From 223ac2f42d49dd0324ca02ea15897ead1a2f5133 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Steinbrink?= Date: Mon, 31 Mar 2008 04:22:53 +0200 Subject: x86, pci: fix off-by-one errors in some pirq warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix bogus pirq warnings reported in: http://bugzilla.kernel.org/show_bug.cgi?id=10366 safe to be backported to v2.6.25 and earlier. Cc: stable@kernel.org Signed-off-by: Björn Steinbrink Signed-off-by: Ingo Molnar --- arch/x86/pci/irq.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index a8715861877e..579745ca6b66 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -200,7 +200,7 @@ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; - WARN_ON_ONCE(pirq >= 16); + WARN_ON_ONCE(pirq > 16); return irqmap[read_config_nybble(router, 0x48, pirq-1)]; } @@ -209,7 +209,7 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; unsigned int val = irqmap[irq]; - WARN_ON_ONCE(pirq >= 16); + WARN_ON_ONCE(pirq > 16); if (val) { write_config_nybble(router, 0x48, pirq-1, val); return 1; @@ -260,7 +260,7 @@ static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; - WARN_ON_ONCE(pirq >= 5); + WARN_ON_ONCE(pirq > 5); return read_config_nybble(router, 0x55, pirqmap[pirq-1]); } @@ -268,7 +268,7 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; - WARN_ON_ONCE(pirq >= 5); + WARN_ON_ONCE(pirq > 5); write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); return 1; } @@ -282,7 +282,7 @@ static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; - WARN_ON_ONCE(pirq >= 4); + WARN_ON_ONCE(pirq > 4); return read_config_nybble(router,0x43, pirqmap[pirq-1]); } @@ -290,7 +290,7 @@ static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; - WARN_ON_ONCE(pirq >= 4); + WARN_ON_ONCE(pirq > 4); write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); return 1; } -- cgit v1.2.1 From b4e0409a36f4533770a12095bde2a574a08a319e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 13:45:16 +0100 Subject: x86: check vmlinux limits, 64-bit these build-time and link-time checks would have prevented the vmlinux size regression. Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 13 +++++++++++++ arch/x86/kernel/vmlinux_64.lds.S | 6 ++++++ arch/x86/mm/fault.c | 4 ---- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index ad2440832de0..38f32e798a99 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -82,6 +82,19 @@ void __init x86_64_start_kernel(char * real_mode_data) { int i; + /* + * Build-time sanity checks on the kernel image and module + * area mappings. (these are purely build-time and produce no code) + */ + BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); + BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); + BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); + BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); + BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); + BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); + BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == + (__START_KERNEL & PGDIR_MASK))); + /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index fab132299735..4c369451007b 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -247,3 +247,9 @@ SECTIONS DWARF_DEBUG } + +/* + * Build-time check on the image size: + */ +ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE") diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index ec08d8389850..81fcbeec3892 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -976,9 +976,5 @@ void vmalloc_sync_all(void) if (address == start) start = address + PGDIR_SIZE; } - /* Check that there is no need to do the same for the modules area. */ - BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); - BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == - (__START_KERNEL & PGDIR_MASK))); #endif } -- cgit v1.2.1 From 85eb69a16aab5a394ce043c2131319eae35e6493 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 12:50:51 +0100 Subject: x86: increase the kernel text limit to 512 MB people sometimes do crazy stuff like building really large static arrays into their kernels or building allyesconfig kernels. Give more space to the kernel and push modules up a bit: kernel has 512 MB and modules have 1.5 GB. Should be enough for a few years ;-) Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 4 ++-- include/asm-x86/page_64.h | 4 ++-- include/asm-x86/pgtable_64.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a007454133a3..017216916dff 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -383,12 +383,12 @@ NEXT_PAGE(level2_ident_pgt) NEXT_PAGE(level2_kernel_pgt) /* - * 128 MB kernel mapping. We spend a full page on this pagetable + * 512 MB kernel mapping. We spend a full page on this pagetable * anyway. * * The kernel code+data+bss must not be bigger than that. * - * (NOTE: at +128MB starts the module area, see MODULES_VADDR. + * (NOTE: at +512MB starts the module area, see MODULES_VADDR. * If you want to increase this then increase MODULES_VADDR * too.) */ diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 143546073b95..aee05c616e05 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -48,10 +48,10 @@ #define __VIRTUAL_MASK_SHIFT 48 /* - * Kernel image size is limited to 128 MB (see level2_kernel_pgt in + * Kernel image size is limited to 512 MB (see level2_kernel_pgt in * arch/x86/kernel/head_64.S), and it is mapped here: */ -#define KERNEL_IMAGE_SIZE (128*1024*1024) +#define KERNEL_IMAGE_SIZE (512*1024*1024) #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #ifndef __ASSEMBLY__ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 0a0b77bc736a..01d2359e7a34 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -140,7 +140,7 @@ static inline void native_pgd_clear(pgd_t * pgd) #define VMALLOC_START _AC(0xffffc20000000000, UL) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) -#define MODULES_VADDR _AC(0xffffffff88000000, UL) +#define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xfffffffffff00000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) -- cgit v1.2.1 From 1a3e4ca41c5a38975023a6e8831c309d3322889c Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Wed, 9 Apr 2008 01:29:27 -0700 Subject: x86 vDSO: don't use disabled vDSO for signal trampoline If the vDSO was not mapped, don't use it as the "restorer" for a signal handler. Whether we have a pointer in mm->context.vdso depends on what happened at exec time, so we shouldn't check any global flags now. Background: Currently, every 32-bit exec gets the vDSO mapped even if it's disabled (the process just doesn't get told about it). Because it's in fact always there, the bug that this patch fixes cannot happen now. With the second patch, it won't be mapped at all when it's disabled, which is one of the things that people might really want when they disable it (so nothing they didn't ask for goes into their address space). The 32-bit signal handler setup when SA_RESTORER is not used refers to current->mm->context.vdso without regard to whether the vDSO has been disabled when the process was exec'd. This patch fixes this not to use it when it's null, which becomes possible after the second patch. (This never happens in normal use, because glibc's sigaction call uses SA_RESTORER unless glibc detected the vDSO.) Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 2 +- arch/x86/kernel/signal_32.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 5e7771a3ba2f..05e155d3fb6c 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -468,7 +468,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, restorer = ka->sa.sa_restorer; } else { /* Return stub is in 32bit vsyscall page */ - if (current->binfmt->hasvdso) + if (current->mm->context.vdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 0157a6f0f41f..011c62fa563c 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -365,7 +365,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, goto give_sigsegv; } - if (current->binfmt->hasvdso) + if (current->mm->context.vdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; -- cgit v1.2.1 From 5de253cc5b1f565f7aeb5bacd67bac37e943ceef Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Wed, 9 Apr 2008 01:30:06 -0700 Subject: x86 vDSO: don't map 32-bit vdso when disabled We map a VMA for the 32-bit vDSO even when it's disabled, which is stupid. For the 32-bit kernel it's the vdso_enabled boot parameter/sysctl and for the 64-bit kernel it's the vdso32 boot parameter/syscall32 sysctl. When it's disabled, we don't pass AT_SYSINFO_EHDR so processes don't use the vDSO for anything, but we still map it. For the non-compat vDSO, this means we're always putting an extra VMA somewhere, maybe lousing up the control of the address space the user was hoping for. Honor the setting by doing nothing in arch_setup_additional_pages. [ also see: "x86 vDSO: don't use disabled vDSO for signal trampoline" ] Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/vdso/vdso32-setup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 348f1341e1c8..f7e78d84fc01 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -325,6 +325,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) int ret = 0; bool compat; + if (vdso_enabled == VDSO_DISABLED) + return 0; + down_write(&mm->mmap_sem); /* Test compat mode once here, in case someone -- cgit v1.2.1 From ee7ae7a1981caaa4a5b14d8c75692a9dccd52105 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: add debug info to DEBUG_PAGEALLOC Add debug information for DEBUG_PAGEALLOC to get some statistics about the pool usage and split status. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/pageattr.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7b79f6be4e7d..6cdfc0fd68be 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include @@ -918,6 +920,45 @@ void kernel_map_pages(struct page *page, int numpages, int enable) cpa_fill_pool(NULL); } +#ifdef CONFIG_DEBUG_FS +static int dpa_show(struct seq_file *m, void *v) +{ + seq_puts(m, "DEBUG_PAGEALLOC\n"); + seq_printf(m, "pool_size : %lu\n", pool_size); + seq_printf(m, "pool_pages : %lu\n", pool_pages); + seq_printf(m, "pool_low : %lu\n", pool_low); + seq_printf(m, "pool_used : %lu\n", pool_used); + seq_printf(m, "pool_failed : %lu\n", pool_failed); + + return 0; +} + +static int dpa_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, dpa_show, NULL); +} + +static const struct file_operations dpa_fops = { + .open = dpa_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int __init debug_pagealloc_proc_init(void) +{ + struct dentry *de; + + de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL, + &dpa_fops); + if (!de) + return -ENOMEM; + + return 0; +} +__initcall(debug_pagealloc_proc_init); +#endif + #ifdef CONFIG_HIBERNATION bool kernel_page_present(struct page *page) -- cgit v1.2.1 From 2596e0fae094be9354b29ddb17e6326a18012e8c Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: unify arch/x86/mm/Makefile Unify arch/x86/mm/Makefile between 32 and 64 bits. All configuration variables that are protected by Kconfig constraints have been put in the common part of the Makefile; however, the NUMA files are totally different between 32 and 64 bits and are handled via an ifdef. Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/Makefile | 14 ++++++++++++-- arch/x86/mm/Makefile_32 | 9 --------- arch/x86/mm/Makefile_64 | 9 --------- 3 files changed, 12 insertions(+), 20 deletions(-) delete mode 100644 arch/x86/mm/Makefile_32 delete mode 100644 arch/x86/mm/Makefile_64 diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 983291096848..8e81660604bc 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,5 +1,15 @@ +obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o + +obj-$(CONFIG_X86_32) += pgtable_32.o + +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o + +obj-$(CONFIG_HIGHMEM) += highmem_32.o + ifeq ($(CONFIG_X86_32),y) -include ${srctree}/arch/x86/mm/Makefile_32 +obj-$(CONFIG_NUMA) += discontig_32.o else -include ${srctree}/arch/x86/mm/Makefile_64 +obj-$(CONFIG_NUMA) += numa_64.o +obj-$(CONFIG_K8_NUMA) += k8topology_64.o +obj-$(CONFIG_ACPI_NUMA) += srat_64.o endif diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32 deleted file mode 100644 index c36ae88bb543..000000000000 --- a/arch/x86/mm/Makefile_32 +++ /dev/null @@ -1,9 +0,0 @@ -# -# Makefile for the linux i386-specific parts of the memory manager. -# - -obj-y := init_32.o pgtable_32.o fault.o ioremap.o extable.o pageattr.o mmap.o - -obj-$(CONFIG_NUMA) += discontig_32.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_HIGHMEM) += highmem_32.o diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64 deleted file mode 100644 index 688c8c28ac8f..000000000000 --- a/arch/x86/mm/Makefile_64 +++ /dev/null @@ -1,9 +0,0 @@ -# -# Makefile for the linux x86_64-specific parts of the memory manager. -# - -obj-y := init_64.o fault.o ioremap.o extable.o pageattr.o mmap.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_NUMA) += numa_64.o -obj-$(CONFIG_K8_NUMA) += k8topology_64.o -obj-$(CONFIG_ACPI_NUMA) += srat_64.o -- cgit v1.2.1 From 926e5392ba8a388ae32ca0d2714cc2c73945c609 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: add code to dump the (kernel) page tables for visual inspection by kernel developers This patch adds code to the kernel to have an (optional) /proc/kernel_page_tables debug file that basically dumps the kernel pagetables; this allows us kernel developers to verify that nothing fishy is going on and that the various mappings are set up correctly. This was quite useful in finding various change_page_attr() bugs, and is very likely to be useful in the future as well. Signed-off-by: Arjan van de Ven Cc: mingo@elte.hu Cc: tglx@tglx.de Cc: hpa@zytor.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig.debug | 12 ++ arch/x86/mm/Makefile | 1 + arch/x86/mm/dump_pagetables.c | 301 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 314 insertions(+) create mode 100644 arch/x86/mm/dump_pagetables.c diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 702eb39901ca..cb7002eca887 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -54,6 +54,18 @@ config DEBUG_PER_CPU_MAPS Say N if unsure. +config X86_PTDUMP + bool "Export kernel pagetable layout to userspace via debugfs" + depends on X86_64 + select DEBUG_FS + help + Say Y here if you want to show the kernel pagetable layout in a + debugfs file. This information is only useful for kernel developers + who are working in architecture specific areas of the kernel. + It is probably not a good idea to enable this feature in a production + kernel. + If in doubt, say "N" + config DEBUG_RODATA bool "Write protect kernel read-only data structures" default y diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 8e81660604bc..28632f42ca66 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -12,4 +12,5 @@ else obj-$(CONFIG_NUMA) += numa_64.o obj-$(CONFIG_K8_NUMA) += k8topology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_64.o +obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o endif diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c new file mode 100644 index 000000000000..5e7f6430c27e --- /dev/null +++ b/arch/x86/mm/dump_pagetables.c @@ -0,0 +1,301 @@ +/* + * Debug helper to dump the current kernel pagetables of the system + * so that we can see what the various memory ranges are set to. + * + * (C) Copyright 2008 Intel Corporation + * + * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include + +#include + +/* + * The dumper groups pagetable entries of the same type into one, and for + * that it needs to keep some state when walking, and flush this state + * when a "break" in the continuity is found. + */ +struct pg_state { + int level; + pgprot_t current_prot; + unsigned long start_address; + unsigned long current_address; + int printed_vmalloc; + int printed_modules; + int printed_vmemmap; + int printed_highmap; +}; + +/* Multipliers for offsets within the PTEs */ +#define LEVEL_4_MULT (PAGE_SIZE) +#define LEVEL_3_MULT (512UL * LEVEL_4_MULT) +#define LEVEL_2_MULT (512UL * LEVEL_3_MULT) +#define LEVEL_1_MULT (512UL * LEVEL_2_MULT) + + +/* + * Print a readable form of a pgprot_t to the seq_file + */ +static void printk_prot(struct seq_file *m, pgprot_t prot, int level) +{ + unsigned long pr = pgprot_val(prot); + + if (pr & _PAGE_USER) + seq_printf(m, "USR "); + else + seq_printf(m, " "); + if (pr & _PAGE_RW) + seq_printf(m, "RW "); + else + seq_printf(m, "ro "); + if (pr & _PAGE_PWT) + seq_printf(m, "PWT "); + else + seq_printf(m, " "); + if (pr & _PAGE_PCD) + seq_printf(m, "PCD "); + else + seq_printf(m, " "); + + /* Bit 9 has a different meaning on level 3 vs 4 */ + if (level <= 3) { + if (pr & _PAGE_PSE) + seq_printf(m, "PSE "); + else + seq_printf(m, " "); + } else { + if (pr & _PAGE_PAT) + seq_printf(m, "pat "); + else + seq_printf(m, " "); + } + if (pr & _PAGE_GLOBAL) + seq_printf(m, "GLB "); + else + seq_printf(m, " "); + if (pr & _PAGE_NX) + seq_printf(m, "NX "); + else + seq_printf(m, "x "); +} + +/* + * Sign-extend the 48 bit address to 64 bit + */ +static unsigned long sign_extend(unsigned long u) +{ + if (u>>47) + u = u | (0xffffUL << 48); + return u; +} + +/* + * This function gets called on a break in a continuous series + * of PTE entries; the next one is different so we need to + * print what we collected so far. + */ +static void note_page(struct seq_file *m, struct pg_state *st, + pgprot_t new_prot, int level) +{ + unsigned long prot, cur; + + /* + * If we have a "break" in the series, we need to flush the state that + * we have now. "break" is either changing perms or a different level. + */ + prot = pgprot_val(new_prot) & ~(PTE_MASK); + cur = pgprot_val(st->current_prot) & ~(PTE_MASK); + + if ((prot != cur || level != st->level) && + st->current_address != st->start_address) { + char unit = 'K'; + unsigned long delta; + + /* + * We print markers for special areas of address space, + * such as the start of vmalloc space etc. + * This helps in the interpretation. + */ + if (!st->printed_vmalloc && + st->start_address >= VMALLOC_START) { + seq_printf(m, "---[ VMALLOC SPACE ]---\n"); + st->printed_vmalloc = 1; + } + if (!st->printed_modules && + st->start_address >= MODULES_VADDR) { + seq_printf(m, "---[ MODULES SPACE ]---\n"); + st->printed_modules = 1; + } + if (st->printed_modules < 2 && + st->start_address >= MODULES_END) { + seq_printf(m, "---[ END MODULES SPACE ]---\n"); + st->printed_modules = 2; + } + if (!st->printed_vmemmap && + st->start_address >= VMEMMAP_START) { + seq_printf(m, "---[ VMMEMMAP SPACE ]---\n"); + st->printed_vmemmap = 1; + } + if (!st->printed_highmap && + st->start_address >= __START_KERNEL_map) { + seq_printf(m, "---[ HIGH KERNEL MAPPING ]---\n"); + st->printed_highmap = 1; + } + + /* + * Now print the actual finished series + */ + seq_printf(m, "[ %016lx - %016lx ", + st->start_address, st->current_address); + + delta = (st->current_address - st->start_address) >> 10; + if ((delta & 1023) == 0) { + delta = delta >> 10; + unit = 'M'; + } + if (pgprot_val(st->current_prot)) { + seq_printf(m, "Size %9lu%cb ", delta, unit); + printk_prot(m, st->current_prot, st->level); + seq_printf(m, "L%i]\n", st->level); + } else { + /* don't print protections on non-present memory */ + seq_printf(m, "%14lu%cb", delta, unit); + seq_printf(m, " L%i]\n", + st->level); + } + st->start_address = st->current_address; + st->current_prot = new_prot; + st->level = level; + }; +} + +static void walk_level_4(struct seq_file *m, struct pg_state *st, pmd_t addr, + unsigned long P) +{ + int i; + pte_t *start; + + start = (pte_t *) pmd_page_vaddr(addr); + for (i = 0; i < PTRS_PER_PTE; i++) { + pgprot_t prot = pte_pgprot(*start); + + st->current_address = sign_extend(P + i * LEVEL_4_MULT); + note_page(m, st, prot, 4); + start++; + } +} + + +static void walk_level_3(struct seq_file *m, struct pg_state *st, pud_t addr, + unsigned long P) +{ + int i; + pmd_t *start; + + start = (pmd_t *) pud_page_vaddr(addr); + for (i = 0; i < PTRS_PER_PMD; i++) { + st->current_address = sign_extend(P + i * LEVEL_3_MULT); + if (!pmd_none(*start)) { + unsigned long prot; + + prot = pmd_val(*start) & ~(PTE_MASK); + /* Deal with 2Mb pages */ + if (pmd_large(*start)) + note_page(m, st, __pgprot(prot), 3); + else + walk_level_4(m, st, *start, + P + i * LEVEL_3_MULT); + } else + note_page(m, st, __pgprot(0), 3); + start++; + } +} + + +static void walk_level_2(struct seq_file *m, struct pg_state *st, pgd_t addr, + unsigned long P) +{ + int i; + pud_t *start; + + start = (pud_t *) pgd_page_vaddr(addr); + + for (i = 0; i < PTRS_PER_PUD; i++) { + if (!pud_none(*start)) { + unsigned long prot; + + prot = pud_val(*start) & ~(PTE_MASK); + /* Deal with 1Gb pages */ + if (pud_large(*start)) + note_page(m, st, __pgprot(prot), 2); + else + walk_level_3(m, st, *start, + P + i * LEVEL_2_MULT); + } else + note_page(m, st, __pgprot(0), 2); + + start++; + } +} + +static void walk_level_1(struct seq_file *m) +{ + pgd_t *start = (pgd_t *) &init_level4_pgt; + int i; + struct pg_state st; + + memset(&st, 0, sizeof(st)); + st.level = 1; + + for (i = 0; i < PTRS_PER_PGD; i++) { + if (!pgd_none(*start)) + walk_level_2(m, &st, *start, i * LEVEL_1_MULT); + else + note_page(m, &st, __pgprot(0), 1); + start++; + } +} + +static int ptdump_show(struct seq_file *m, void *v) +{ + seq_puts(m, "Kernel pagetable dump\n"); + walk_level_1(m); + return 0; +} + +static int ptdump_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show, NULL); +} + +static const struct file_operations ptdump_fops = { + .open = ptdump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int pt_dump_init(void) +{ + struct dentry *pe; + + pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, + &ptdump_fops); + if (!pe) + return -ENOMEM; + + return 0; +} + +__initcall(pt_dump_init); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arjan van de Ven "); +MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables"); -- cgit v1.2.1 From fe770bf0310d90b3b033c19044d45b7de5f2041c Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: clean up the page table dumper and add 32-bit support Clean up the page table dumper (fix boundary conditions, table driven address ranges, some formatting changes since it is no longer using the kernel log but a separate virtual file), and generalize to 32 bits. [ mingo@elte.hu: x86: fix the pagetable dumper ] Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig.debug | 2 +- arch/x86/mm/Makefile | 2 +- arch/x86/mm/dump_pagetables.c | 301 +++++++++++++++++++++++++----------------- 3 files changed, 179 insertions(+), 126 deletions(-) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index cb7002eca887..7ce8e7025661 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -56,7 +56,7 @@ config DEBUG_PER_CPU_MAPS config X86_PTDUMP bool "Export kernel pagetable layout to userspace via debugfs" - depends on X86_64 + depends on DEBUG_KERNEL select DEBUG_FS help Say Y here if you want to show the kernel pagetable layout in a diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 28632f42ca66..9ab9889863f0 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -3,6 +3,7 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o obj-$(CONFIG_X86_32) += pgtable_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o @@ -12,5 +13,4 @@ else obj-$(CONFIG_NUMA) += numa_64.o obj-$(CONFIG_K8_NUMA) += k8topology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_64.o -obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o endif diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 5e7f6430c27e..6d840338f808 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -12,9 +12,10 @@ * of the License. */ +#include +#include #include #include -#include #include @@ -28,73 +29,107 @@ struct pg_state { pgprot_t current_prot; unsigned long start_address; unsigned long current_address; - int printed_vmalloc; - int printed_modules; - int printed_vmemmap; - int printed_highmap; + const struct addr_marker *marker; }; -/* Multipliers for offsets within the PTEs */ -#define LEVEL_4_MULT (PAGE_SIZE) -#define LEVEL_3_MULT (512UL * LEVEL_4_MULT) -#define LEVEL_2_MULT (512UL * LEVEL_3_MULT) -#define LEVEL_1_MULT (512UL * LEVEL_2_MULT) +struct addr_marker { + unsigned long start_address; + const char *name; +}; + +/* Address space markers hints */ +static struct addr_marker address_markers[] = { + { 0, "User Space" }, +#ifdef CONFIG_X86_64 + { 0x8000000000000000UL, "Kernel Space" }, + { 0xffff810000000000UL, "Low Kernel Mapping" }, + { VMALLOC_START, "vmalloc() Area" }, + { MODULES_VADDR, "Modules" }, + { MODULES_END, "End Modules" }, + { VMEMMAP_START, "Vmemmap" }, + { __START_KERNEL_map, "High Kernel Mapping" }, +#else + { PAGE_OFFSET, "Kernel Mapping" }, + { 0/* VMALLOC_START */, "vmalloc() Area" }, + { 0/*VMALLOC_END*/, "vmalloc() End" }, +# ifdef CONFIG_HIGHMEM + { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, +# endif + { 0/*FIXADDR_START*/, "Fixmap Area" }, +#endif + { -1, NULL } /* End of list */ +}; +/* Multipliers for offsets within the PTEs */ +#define PTE_LEVEL_MULT (PAGE_SIZE) +#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) +#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) +#define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) /* * Print a readable form of a pgprot_t to the seq_file */ static void printk_prot(struct seq_file *m, pgprot_t prot, int level) { - unsigned long pr = pgprot_val(prot); - - if (pr & _PAGE_USER) - seq_printf(m, "USR "); - else - seq_printf(m, " "); - if (pr & _PAGE_RW) - seq_printf(m, "RW "); - else - seq_printf(m, "ro "); - if (pr & _PAGE_PWT) - seq_printf(m, "PWT "); - else - seq_printf(m, " "); - if (pr & _PAGE_PCD) - seq_printf(m, "PCD "); - else - seq_printf(m, " "); - - /* Bit 9 has a different meaning on level 3 vs 4 */ - if (level <= 3) { - if (pr & _PAGE_PSE) - seq_printf(m, "PSE "); + pgprotval_t pr = pgprot_val(prot); + static const char * const level_name[] = + { "cr3", "pgd", "pud", "pmd", "pte" }; + + if (!pgprot_val(prot)) { + /* Not present */ + seq_printf(m, " "); + } else { + if (pr & _PAGE_USER) + seq_printf(m, "USR "); else seq_printf(m, " "); - } else { - if (pr & _PAGE_PAT) - seq_printf(m, "pat "); + if (pr & _PAGE_RW) + seq_printf(m, "RW "); + else + seq_printf(m, "ro "); + if (pr & _PAGE_PWT) + seq_printf(m, "PWT "); + else + seq_printf(m, " "); + if (pr & _PAGE_PCD) + seq_printf(m, "PCD "); else seq_printf(m, " "); + + /* Bit 9 has a different meaning on level 3 vs 4 */ + if (level <= 3) { + if (pr & _PAGE_PSE) + seq_printf(m, "PSE "); + else + seq_printf(m, " "); + } else { + if (pr & _PAGE_PAT) + seq_printf(m, "pat "); + else + seq_printf(m, " "); + } + if (pr & _PAGE_GLOBAL) + seq_printf(m, "GLB "); + else + seq_printf(m, " "); + if (pr & _PAGE_NX) + seq_printf(m, "NX "); + else + seq_printf(m, "x "); } - if (pr & _PAGE_GLOBAL) - seq_printf(m, "GLB "); - else - seq_printf(m, " "); - if (pr & _PAGE_NX) - seq_printf(m, "NX "); - else - seq_printf(m, "x "); + seq_printf(m, "%s\n", level_name[level]); } /* - * Sign-extend the 48 bit address to 64 bit + * On 64 bits, sign-extend the 48 bit address to 64 bit */ -static unsigned long sign_extend(unsigned long u) +static unsigned long normalize_addr(unsigned long u) { - if (u>>47) - u = u | (0xffffUL << 48); +#ifdef CONFIG_X86_64 + return (signed long)(u << 16) >> 16; +#else return u; +#endif } /* @@ -103,81 +138,62 @@ static unsigned long sign_extend(unsigned long u) * print what we collected so far. */ static void note_page(struct seq_file *m, struct pg_state *st, - pgprot_t new_prot, int level) + pgprot_t new_prot, int level) { - unsigned long prot, cur; + pgprotval_t prot, cur; + static const char units[] = "KMGTPE"; /* * If we have a "break" in the series, we need to flush the state that - * we have now. "break" is either changing perms or a different level. + * we have now. "break" is either changing perms, levels or + * address space marker. */ prot = pgprot_val(new_prot) & ~(PTE_MASK); cur = pgprot_val(st->current_prot) & ~(PTE_MASK); - if ((prot != cur || level != st->level) && - st->current_address != st->start_address) { - char unit = 'K'; + if (!st->level) { + /* First entry */ + st->current_prot = new_prot; + st->level = level; + st->marker = address_markers; + seq_printf(m, "---[ %s ]---\n", st->marker->name); + } else if (prot != cur || level != st->level || + st->current_address >= st->marker[1].start_address) { + const char *unit = units; unsigned long delta; - /* - * We print markers for special areas of address space, - * such as the start of vmalloc space etc. - * This helps in the interpretation. - */ - if (!st->printed_vmalloc && - st->start_address >= VMALLOC_START) { - seq_printf(m, "---[ VMALLOC SPACE ]---\n"); - st->printed_vmalloc = 1; - } - if (!st->printed_modules && - st->start_address >= MODULES_VADDR) { - seq_printf(m, "---[ MODULES SPACE ]---\n"); - st->printed_modules = 1; - } - if (st->printed_modules < 2 && - st->start_address >= MODULES_END) { - seq_printf(m, "---[ END MODULES SPACE ]---\n"); - st->printed_modules = 2; - } - if (!st->printed_vmemmap && - st->start_address >= VMEMMAP_START) { - seq_printf(m, "---[ VMMEMMAP SPACE ]---\n"); - st->printed_vmemmap = 1; - } - if (!st->printed_highmap && - st->start_address >= __START_KERNEL_map) { - seq_printf(m, "---[ HIGH KERNEL MAPPING ]---\n"); - st->printed_highmap = 1; - } - /* * Now print the actual finished series */ - seq_printf(m, "[ %016lx - %016lx ", - st->start_address, st->current_address); + seq_printf(m, "0x%p-0x%p ", + (void *)st->start_address, + (void *)st->current_address); delta = (st->current_address - st->start_address) >> 10; - if ((delta & 1023) == 0) { - delta = delta >> 10; - unit = 'M'; + while (!(delta & 1023) && unit[1]) { + delta >>= 10; + unit++; } - if (pgprot_val(st->current_prot)) { - seq_printf(m, "Size %9lu%cb ", delta, unit); - printk_prot(m, st->current_prot, st->level); - seq_printf(m, "L%i]\n", st->level); - } else { - /* don't print protections on non-present memory */ - seq_printf(m, "%14lu%cb", delta, unit); - seq_printf(m, " L%i]\n", - st->level); + seq_printf(m, "%9lu%c ", delta, *unit); + printk_prot(m, st->current_prot, st->level); + + /* + * We print markers for special areas of address space, + * such as the start of vmalloc space etc. + * This helps in the interpretation. + */ + if (st->current_address >= st->marker[1].start_address) { + st->marker++; + seq_printf(m, "---[ %s ]---\n", st->marker->name); } + st->start_address = st->current_address; st->current_prot = new_prot; st->level = level; - }; + } } -static void walk_level_4(struct seq_file *m, struct pg_state *st, pmd_t addr, +static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, unsigned long P) { int i; @@ -187,14 +203,15 @@ static void walk_level_4(struct seq_file *m, struct pg_state *st, pmd_t addr, for (i = 0; i < PTRS_PER_PTE; i++) { pgprot_t prot = pte_pgprot(*start); - st->current_address = sign_extend(P + i * LEVEL_4_MULT); + st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); note_page(m, st, prot, 4); start++; } } +#if PTRS_PER_PMD > 1 -static void walk_level_3(struct seq_file *m, struct pg_state *st, pud_t addr, +static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P) { int i; @@ -202,25 +219,30 @@ static void walk_level_3(struct seq_file *m, struct pg_state *st, pud_t addr, start = (pmd_t *) pud_page_vaddr(addr); for (i = 0; i < PTRS_PER_PMD; i++) { - st->current_address = sign_extend(P + i * LEVEL_3_MULT); + st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); if (!pmd_none(*start)) { - unsigned long prot; + pgprotval_t prot = pmd_val(*start) & ~PTE_MASK; - prot = pmd_val(*start) & ~(PTE_MASK); - /* Deal with 2Mb pages */ - if (pmd_large(*start)) + if (pmd_large(*start) || !pmd_present(*start)) note_page(m, st, __pgprot(prot), 3); else - walk_level_4(m, st, *start, - P + i * LEVEL_3_MULT); + walk_pte_level(m, st, *start, + P + i * PMD_LEVEL_MULT); } else note_page(m, st, __pgprot(0), 3); start++; } } +#else +#define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p) +#define pud_large(a) pmd_large(__pmd(pud_val(a))) +#define pud_none(a) pmd_none(__pmd(pud_val(a))) +#endif -static void walk_level_2(struct seq_file *m, struct pg_state *st, pgd_t addr, +#if PTRS_PER_PUD > 1 + +static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P) { int i; @@ -229,16 +251,15 @@ static void walk_level_2(struct seq_file *m, struct pg_state *st, pgd_t addr, start = (pud_t *) pgd_page_vaddr(addr); for (i = 0; i < PTRS_PER_PUD; i++) { + st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); if (!pud_none(*start)) { - unsigned long prot; + pgprotval_t prot = pud_val(*start) & ~PTE_MASK; - prot = pud_val(*start) & ~(PTE_MASK); - /* Deal with 1Gb pages */ - if (pud_large(*start)) + if (pud_large(*start) || !pud_present(*start)) note_page(m, st, __pgprot(prot), 2); else - walk_level_3(m, st, *start, - P + i * LEVEL_2_MULT); + walk_pmd_level(m, st, *start, + P + i * PUD_LEVEL_MULT); } else note_page(m, st, __pgprot(0), 2); @@ -246,28 +267,48 @@ static void walk_level_2(struct seq_file *m, struct pg_state *st, pgd_t addr, } } -static void walk_level_1(struct seq_file *m) +#else +#define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(pgd_val(a)),p) +#define pgd_large(a) pud_large(__pud(pgd_val(a))) +#define pgd_none(a) pud_none(__pud(pgd_val(a))) +#endif + +static void walk_pgd_level(struct seq_file *m) { +#ifdef CONFIG_X86_64 pgd_t *start = (pgd_t *) &init_level4_pgt; +#else + pgd_t *start = swapper_pg_dir; +#endif int i; struct pg_state st; memset(&st, 0, sizeof(st)); - st.level = 1; for (i = 0; i < PTRS_PER_PGD; i++) { - if (!pgd_none(*start)) - walk_level_2(m, &st, *start, i * LEVEL_1_MULT); - else + st.current_address = normalize_addr(i * PGD_LEVEL_MULT); + if (!pgd_none(*start)) { + pgprotval_t prot = pgd_val(*start) & ~PTE_MASK; + + if (pgd_large(*start) || !pgd_present(*start)) + note_page(m, &st, __pgprot(prot), 1); + else + walk_pud_level(m, &st, *start, + i * PGD_LEVEL_MULT); + } else note_page(m, &st, __pgprot(0), 1); + start++; } + + /* Flush out the last page */ + st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); + note_page(m, &st, __pgprot(0), 0); } static int ptdump_show(struct seq_file *m, void *v) { - seq_puts(m, "Kernel pagetable dump\n"); - walk_level_1(m); + walk_pgd_level(m); return 0; } @@ -287,6 +328,18 @@ int pt_dump_init(void) { struct dentry *pe; +#ifdef CONFIG_X86_32 + /* Not a compile-time constant on x86-32 */ + address_markers[2].start_address = VMALLOC_START; + address_markers[3].start_address = VMALLOC_END; +# ifdef CONFIG_HIGHMEM + address_markers[4].start_address = PKMAP_BASE; + address_markers[5].start_address = FIXADDR_START; +# else + address_markers[4].start_address = FIXADDR_START; +# endif +#endif + pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, &ptdump_fops); if (!pe) -- cgit v1.2.1 From 00d1c5e05736f947687be27706bda01cec104e57 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: add gbpages switches These new controls toggle experimental support for a new CPU feature, the straightforward extension of largepages from the pmd level to the pud level, which allows 1GB (kernel) TLBs instead of 2MB TLBs. Turn it off by default, as this code has not been tested well enough yet. Use the CONFIG_DIRECT_GBPAGES=y .config option or gbpages on the boot line can be used to enable it. If enabled in the .config then nogbpages boot option disables it. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- Documentation/x86_64/boot-options.txt | 5 +++++ arch/x86/Kconfig.debug | 12 ++++++++++++ arch/x86/mm/init_64.c | 20 ++++++++++++++++++++ include/asm-x86/pgtable_64.h | 2 ++ 4 files changed, 39 insertions(+) diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt index 34abae4e9442..b0c7b6c4abda 100644 --- a/Documentation/x86_64/boot-options.txt +++ b/Documentation/x86_64/boot-options.txt @@ -307,3 +307,8 @@ Debugging stuck (default) Miscellaneous + + nogbpages + Do not use GB pages for kernel direct mappings. + gbpages + Use GB pages for kernel direct mappings. diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 7ce8e7025661..f4413c04e687 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -76,6 +76,18 @@ config DEBUG_RODATA data. This is recommended so that we can catch kernel bugs sooner. If in doubt, say "Y". +config DIRECT_GBPAGES + bool "Enable gbpages-mapped kernel pagetables" + depends on DEBUG_KERNEL && EXPERIMENTAL && X86_64 + help + Enable gigabyte pages support (if the CPU supports it). This can + improve the kernel's performance a tiny bit by reducing TLB + pressure. + + This is experimental code. + + If in doubt, say "N". + config DEBUG_RODATA_TEST bool "Testcase for the DEBUG_RODATA feature" depends on DEBUG_RODATA diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a02a14f0f324..6e7d5a42a09a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -54,6 +54,26 @@ static unsigned long dma_reserve __initdata; DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +int direct_gbpages __meminitdata +#ifdef CONFIG_DIRECT_GBPAGES + = 1 +#endif +; + +static int __init parse_direct_gbpages_off(char *arg) +{ + direct_gbpages = 0; + return 0; +} +early_param("nogbpages", parse_direct_gbpages_off); + +static int __init parse_direct_gbpages_on(char *arg) +{ + direct_gbpages = 1; + return 0; +} +early_param("gbpages", parse_direct_gbpages_on); + /* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 01d2359e7a34..6ef09914acbe 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -239,6 +239,8 @@ static inline int pud_large(pud_t pte) #define update_mmu_cache(vma,address,pte) do { } while (0) +extern int direct_gbpages; + /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) -- cgit v1.2.1 From ef9257668e3199f9566dc4a31f5292838bd99b49 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: do kernel direct mapping at boot using GB pages The AMD Fam10h CPUs support new Gigabyte page table entry for mapping 1GB at a time. Use this for the kernel direct mapping. Only done for 64bit because i386 does not support GB page tables. This only applies to the data portion of the direct mapping; the kernel text mapping stays with 2MB pages because the AMD Fam10h microarchitecture does not support GB ITLBs and AMD recommends against using GB mappings for code. Can be disabled with disable_gbpages on the kernel command line [ tglx@linutronix.de: simplify enable code ] [ Yinghai Lu : boot fix on 256 GB RAM ] Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/init_64.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6e7d5a42a09a..ae66d8d4c58d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -370,7 +370,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) } if (pud_val(*pud)) { - phys_pmd_update(pud, addr, end); + if (!pud_large(*pud)) + phys_pmd_update(pud, addr, end); + continue; + } + + if (direct_gbpages) { + set_pte((pte_t *)pud, + pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); continue; } @@ -391,9 +398,11 @@ static void __init find_early_table_space(unsigned long end) unsigned long puds, pmds, tables, start; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; - pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; - tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + - round_up(pmds * sizeof(pmd_t), PAGE_SIZE); + tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); + if (!direct_gbpages) { + pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; + tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); + } /* * RED-PEN putting page tables only on node 0 could @@ -413,6 +422,14 @@ static void __init find_early_table_space(unsigned long end) (table_start << PAGE_SHIFT) + tables); } +static void __init init_gbpages(void) +{ + if (direct_gbpages && cpu_has_gbpages) + printk(KERN_INFO "Using GB pages for direct mapping\n"); + else + direct_gbpages = 0; +} + /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from @@ -431,8 +448,10 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) * memory mapped. Unfortunately this is done currently before the * nodes are discovered. */ - if (!after_bootmem) + if (!after_bootmem) { + init_gbpages(); find_early_table_space(end); + } start = (unsigned long)__va(start); end = (unsigned long)__va(end); -- cgit v1.2.1 From 48c508b364324c35018284328b5b92c51d2b30e0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 17 Apr 2008 17:40:45 +0200 Subject: x86: clean up find_e820_area(), 64-bit Change size to unsigned long, becase caller and user all used unsigned long. Also make bad_addr take an alignment parameter. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/e820_64.c | 22 ++++++++++++---------- include/asm-x86/e820_64.h | 2 +- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 9be697126013..a8694a35352b 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -95,7 +95,8 @@ void __init early_res_to_bootmem(void) } /* Check for already reserved areas */ -static inline int bad_addr(unsigned long *addrp, unsigned long size) +static inline int +bad_addr(unsigned long *addrp, unsigned long size, unsigned long align) { int i; unsigned long addr = *addrp, last; @@ -105,7 +106,7 @@ again: for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { struct early_res *r = &early_res[i]; if (last >= r->start && addr < r->end) { - *addrp = addr = r->end; + *addrp = addr = round_up(r->end, align); changed = 1; goto again; } @@ -174,26 +175,27 @@ int __init e820_all_mapped(unsigned long start, unsigned long end, * Find a free area with specified alignment in a specific range. */ unsigned long __init find_e820_area(unsigned long start, unsigned long end, - unsigned size, unsigned long align) + unsigned long size, unsigned long align) { int i; - unsigned long mask = ~(align - 1); for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; - unsigned long addr = ei->addr, last; + unsigned long addr, last; + unsigned long ei_last; if (ei->type != E820_RAM) continue; + addr = round_up(ei->addr, align); + ei_last = ei->addr + ei->size; if (addr < start) - addr = start; - if (addr > ei->addr + ei->size) + addr = round_up(start, align); + if (addr > ei_last) continue; - while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) + while (bad_addr(&addr, size, align) && addr+size <= ei_last) ; - addr = (addr + align - 1) & mask; last = addr + size; - if (last > ei->addr + ei->size) + if (last > ei_last) continue; if (last > end) continue; diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 22ede73ae724..9e06c6eb4e27 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -15,7 +15,7 @@ #ifndef __ASSEMBLY__ extern unsigned long find_e820_area(unsigned long start, unsigned long end, - unsigned size, unsigned long align); + unsigned long size, unsigned long align); extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void update_memory_range(u64 start, u64 size, unsigned old_type, -- cgit v1.2.1 From b62576a2f53ad7edf604fedba0da1d4329749b7d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 9 Feb 2008 16:16:58 +0100 Subject: x86: use year 2000 offset for cmos clock We know it is already after 2000. Use the year 2000 offset for both 32 and 64 bit, which removes ifdefs and the 1970 magic. [ tglx@linutronix.de: remove 1970 magic, replace bogus commit message ] Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/rtc.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index eb9b1a198f5e..d2569513ad4c 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -9,7 +9,6 @@ #include #ifdef CONFIG_X86_32 -# define CMOS_YEARS_OFFS 1900 /* * This is a special lock that is owned by the CPU and holds the index * register we are working with. It is required for NMI access to the @@ -17,14 +16,11 @@ */ volatile unsigned long cmos_lock = 0; EXPORT_SYMBOL(cmos_lock); -#else -/* - * x86-64 systems only exists since 2002. - * This will work up to Dec 31, 2100 - */ -# define CMOS_YEARS_OFFS 2000 #endif +/* For two digit years assume time is always after that */ +#define CMOS_YEARS_OFFS 2000 + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); @@ -136,11 +132,8 @@ unsigned long mach_get_cmos_time(void) BCD_TO_BIN(century); year += century * 100; printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); - } else { + } else year += CMOS_YEARS_OFFS; - if (year < 1970) - year += 100; - } return mktime(year, mon, day, hour, min, sec); } -- cgit v1.2.1 From 068c9222d0206e8a6a905efeb9f4fe8dde8b5ff5 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 9 Feb 2008 16:16:59 +0100 Subject: x86: add warning when RTC clock reports binary We assume that the RTC clock is BCD, so print a warning if it claims to be binary. [ tglx@linutronix.de: changed to WARN_ON - we want to know that! If no one reports it we can remove the complete if (RTC_ALWAYS_BCD) magic, which has RTC_ALWAYS_BCD defined to 1 since Linux 1.0 ... ] Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/rtc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index d2569513ad4c..d4d8277e890e 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -94,7 +94,7 @@ int mach_set_rtc_mmss(unsigned long nowtime) unsigned long mach_get_cmos_time(void) { - unsigned int year, mon, day, hour, min, sec, century = 0; + unsigned int status, year, mon, day, hour, min, sec, century = 0; /* * If UIP is clear, then we have >= 244 microseconds before @@ -119,7 +119,10 @@ unsigned long mach_get_cmos_time(void) century = CMOS_READ(acpi_gbl_FADT.century); #endif - if (RTC_ALWAYS_BCD || !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)) { + status = CMOS_READ(RTC_CONTROL); + WARN_ON_ONCE((RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); + + if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { BCD_TO_BIN(sec); BCD_TO_BIN(min); BCD_TO_BIN(hour); -- cgit v1.2.1 From 45de70791165ce7eac5232ed5a7c31152567f4da Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 9 Feb 2008 16:17:01 +0100 Subject: x86: enable ACPI extended century handling for 32bit The extended century readout does not solve the year 2038 problem on 32bit! v2: Fix compilation on !ACPI, pointed out by tglx Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/rtc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index d4d8277e890e..91492190ac72 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -112,15 +112,14 @@ unsigned long mach_get_cmos_time(void) mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); -#if defined(CONFIG_ACPI) && defined(CONFIG_X86_64) - /* CHECKME: Is this really 64bit only ??? */ +#ifdef CONFIG_ACPI if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && acpi_gbl_FADT.century) century = CMOS_READ(acpi_gbl_FADT.century); #endif status = CMOS_READ(RTC_CONTROL); - WARN_ON_ONCE((RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); + WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { BCD_TO_BIN(sec); -- cgit v1.2.1 From 92bc2056855b3250bf6fd5849f05f88d85839efa Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:56 -0800 Subject: x86: change most X86_32 pt_regs members to unsigned long Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_32.c | 5 ++--- arch/x86/kernel/signal_32.c | 4 ++-- include/asm-x86/ptrace.h | 28 ++++++++++++++-------------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 43930e73f657..2cd89b8a7050 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -332,7 +332,7 @@ void __show_registers(struct pt_regs *regs, int all) init_utsname()->version); printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", - 0xffff & regs->cs, regs->ip, regs->flags, + (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); @@ -341,8 +341,7 @@ void __show_registers(struct pt_regs *regs, int all) printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", - regs->ds & 0xffff, regs->es & 0xffff, - regs->fs & 0xffff, gs, ss); + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 011c62fa563c..34fc0416b135 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -393,8 +393,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) sig; - regs->dx = (unsigned long) 0; - regs->cx = (unsigned long) 0; + regs->dx = 0; + regs->cx = 0; regs->ds = __USER_DS; regs->es = __USER_DS; diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index d9e04b46a440..708337a36727 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -36,23 +36,23 @@ struct pt_regs { #else /* __KERNEL__ */ struct pt_regs { - long bx; - long cx; - long dx; - long si; - long di; - long bp; + unsigned long bx; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long bp; long ax; - int ds; - int es; - int fs; + unsigned long ds; + unsigned long es; + unsigned long fs; /* int gs; */ long orig_ax; - long ip; - int cs; - long flags; - long sp; - int ss; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; }; #include -- cgit v1.2.1 From 9902a702c76f904be0057f8647dda9d6f89d4847 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:57 -0800 Subject: x86: make X86_32 pt_regs members unsigned long Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/signal_32.c | 4 ++-- include/asm-x86/ptrace.h | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 34fc0416b135..b1e97e80a57c 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -527,7 +527,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, int ret; /* Are we from a system call? */ - if (regs->orig_ax >= 0) { + if ((long)regs->orig_ax >= 0) { /* If so, check system call restarting.. */ switch (regs->ax) { case -ERESTART_RESTARTBLOCK: @@ -625,7 +625,7 @@ static void do_signal(struct pt_regs *regs) } /* Did we come from a system call? */ - if (regs->orig_ax >= 0) { + if ((long)regs->orig_ax >= 0) { /* Restart the system call - no handlers present */ switch (regs->ax) { case -ERESTARTNOHAND: diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 708337a36727..bc442461ac64 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -42,12 +42,12 @@ struct pt_regs { unsigned long si; unsigned long di; unsigned long bp; - long ax; + unsigned long ax; unsigned long ds; unsigned long es; unsigned long fs; /* int gs; */ - long orig_ax; + unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; @@ -145,7 +145,10 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro void signal_fault(struct pt_regs *regs, void __user *frame, char *where); #endif -#define regs_return_value(regs) ((regs)->ax) +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->ax; +} /* * user_mode_vm(regs) determines whether a register set came from user mode. -- cgit v1.2.1 From c3e6ff87a3ad9124a67e149e4f9c080626a0d83e Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:57 -0800 Subject: x86: regparm(3) is mandatory, no need to annotate Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/ptrace.c | 1 - arch/x86/kernel/signal_32.c | 1 - 2 files changed, 2 deletions(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index eb92ccbb3502..559c1b027417 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1456,7 +1456,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) /* notification of system call entry/exit * - triggered by current->work.syscall_trace */ -__attribute__((regparm(3))) int do_syscall_trace(struct pt_regs *regs, int entryexit) { int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU); diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index b1e97e80a57c..a393e3711e08 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -654,7 +654,6 @@ static void do_signal(struct pt_regs *regs) * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ -__attribute__((regparm(3))) void do_notify_resume(struct pt_regs *regs, void *_unused, __u32 thread_info_flags) { -- cgit v1.2.1 From ac66f3fd89ee20b73b3374e6343c5e36e3e3c51a Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:58 -0800 Subject: x86: reduce trivial style differences in signal_32|64.c Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/signal_32.c | 64 +++++++++++++++++++++------------------------ arch/x86/kernel/signal_64.c | 47 +++++++++++++++++---------------- 2 files changed, 55 insertions(+), 56 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index a393e3711e08..182269b752da 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -131,14 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); - COPY(di); - COPY(si); - COPY(bp); - COPY(sp); - COPY(bx); - COPY(dx); - COPY(cx); - COPY(ip); + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); + COPY(dx); COPY(cx); COPY(ip); COPY_SEG_STRICT(cs); COPY_SEG_STRICT(ss); @@ -412,7 +406,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, ptrace_notify(SIGTRAP); #if DEBUG_SIG - printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif @@ -522,7 +516,7 @@ give_sigsegv: static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs * regs) + sigset_t *oldset, struct pt_regs *regs) { int ret; @@ -530,20 +524,21 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, if ((long)regs->orig_ax >= 0) { /* If so, check system call restarting.. */ switch (regs->ax) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->ax = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { regs->ax = -EINTR; break; - - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->ax = -EINTR; - break; - } - /* fallthrough */ - case -ERESTARTNOINTR: - regs->ax = regs->orig_ax; - regs->ip -= 2; + } + /* fallthrough */ + case -ERESTARTNOINTR: + regs->ax = regs->orig_ax; + regs->ip -= 2; + break; } } @@ -580,18 +575,17 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, */ static void do_signal(struct pt_regs *regs) { + struct k_sigaction ka; siginfo_t info; int signr; - struct k_sigaction ka; sigset_t *oldset; /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything - * if so. vm86 regs switched out by assembly code - * before reaching here, so testing against kernel - * CS suffices. + * We want the common case to go fast, which is why we may in certain + * cases get here from kernel mode. Just return without doing anything + * if so. + * X86_32: vm86 regs switched out by assembly code before reaching + * here, so testing against kernel CS suffices. */ if (!user_mode(regs)) return; @@ -608,7 +602,7 @@ static void do_signal(struct pt_regs *regs) * have been cleared if the watchpoint triggered * inside the kernel. */ - if (unlikely(current->thread.debugreg7)) + if (current->thread.debugreg7) set_debugreg(current->thread.debugreg7, 7); /* Whee! Actually deliver the signal. */ @@ -642,8 +636,10 @@ static void do_signal(struct pt_regs *regs) } } - /* if there's no signal to deliver, we just put the saved sigmask - * back */ + /* + * If there's no signal to deliver, we just put the saved sigmask + * back. + */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); @@ -654,12 +650,12 @@ static void do_signal(struct pt_regs *regs) * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ -void do_notify_resume(struct pt_regs *regs, void *_unused, +void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { - regs->flags |= TF_MASK; + regs->flags |= X86_EFLAGS_TF; clear_thread_flag(TIF_SINGLESTEP); } diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 1c83e5124c65..863cebe8e60a 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -345,7 +345,7 @@ static long current_syscall_ret(struct pt_regs *regs) static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) + sigset_t *oldset, struct pt_regs *regs) { int ret; @@ -359,21 +359,21 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, if (current_syscall(regs) >= 0) { /* If so, check system call restarting.. */ switch (current_syscall_ret(regs)) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - regs->ax = -EINTR; - break; + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->ax = -EINTR; + break; - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->ax = -EINTR; - break; - } - /* fallthrough */ - case -ERESTARTNOINTR: - regs->ax = regs->orig_ax; - regs->ip -= 2; + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->ax = -EINTR; break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + regs->ax = regs->orig_ax; + regs->ip -= 2; + break; } } @@ -420,10 +420,11 @@ static void do_signal(struct pt_regs *regs) sigset_t *oldset; /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything + * We want the common case to go fast, which is why we may in certain + * cases get here from kernel mode. Just return without doing anything * if so. + * X86_32: vm86 regs switched out by assembly code before reaching + * here, so testing against kernel CS suffices. */ if (!user_mode(regs)) return; @@ -473,16 +474,18 @@ static void do_signal(struct pt_regs *regs) } } - /* if there's no signal to deliver, we just put the saved sigmask - back. */ + /* + * If there's no signal to deliver, we just put the saved sigmask + * back. + */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } } -void -do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) +void do_notify_resume(struct pt_regs *regs, void *unused, + __u32 thread_info_flags) { #ifdef DEBUG_SIG printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n", @@ -502,7 +505,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) #endif /* CONFIG_X86_MCE */ /* deal with pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK)) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); if (thread_info_flags & _TIF_HRTICK_RESCHED) -- cgit v1.2.1 From 1a1768039c8fdd48d69a6bc3b7f56943b2b20567 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:09:59 -0800 Subject: x86: Use FIX_EFLAGS define in X86_64 [ tglx@linutronix.de: simplified ] Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/signal_32.c | 16 +++++++++++----- arch/x86/kernel/signal_64.c | 14 +++++++++++++- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 182269b752da..9eb23fb66b1e 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -30,6 +30,17 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) +#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ + X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ + X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ + X86_EFLAGS_CF) + +#ifdef CONFIG_X86_32 +# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) +#else +# define FIX_EFLAGS __FIX_EFLAGS +#endif + /* * Atomically swap in the new signal mask, and wait for a signal. */ @@ -122,11 +133,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax err |= __get_user(tmp, &sc->seg); \ loadsegment(seg,tmp); } -#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ - X86_EFLAGS_OF | X86_EFLAGS_DF | \ - X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ - X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) - GET_SEG(gs); COPY_SEG(fs); COPY_SEG(es); diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 863cebe8e60a..b7d7a6d5c26b 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -30,6 +31,17 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) +#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ + X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ + X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ + X86_EFLAGS_CF) + +#ifdef CONFIG_X86_32 +# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) +#else +# define FIX_EFLAGS __FIX_EFLAGS +#endif + int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs); int ia32_setup_frame(int sig, struct k_sigaction *ka, @@ -87,7 +99,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned { unsigned int tmpflags; err |= __get_user(tmpflags, &sc->flags); - regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5); + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ } -- cgit v1.2.1 From 2d19c4580682511be1eadf47cdee22d5eb002f94 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:10:00 -0800 Subject: x86: use sizeof(long) to unify signal_32|64.c Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/signal_32.c | 5 +++-- arch/x86/kernel/signal_64.c | 8 +++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 9eb23fb66b1e..47c85e6b14bb 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -214,11 +214,12 @@ badframe: asmlinkage int sys_rt_sigreturn(unsigned long __unused) { - struct pt_regs *regs = (struct pt_regs *) &__unused; - struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp - 4); + struct pt_regs *regs = (struct pt_regs *)&__unused; + struct rt_sigframe __user *frame; sigset_t set; int ax; + frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index b7d7a6d5c26b..1045a07eeaec 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -133,13 +133,11 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) sigset_t set; unsigned long ax; - frame = (struct rt_sigframe __user *)(regs->sp - 8); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) { + frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; - } - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) { + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - } sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); -- cgit v1.2.1 From 123a63476cafcede1c70529f62a5bfb96a0efc1b Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:10:00 -0800 Subject: x86: move struct definitions to unifed sigframe.h [ tglx@linutronix.de: cleanup the other structs as well ] Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/asm-offsets_32.c | 2 +- arch/x86/kernel/sigframe.h | 27 +++++++++++++++++++++++++++ arch/x86/kernel/sigframe_32.h | 21 --------------------- arch/x86/kernel/signal_32.c | 2 +- arch/x86/kernel/signal_64.c | 9 +-------- 5 files changed, 30 insertions(+), 31 deletions(-) create mode 100644 arch/x86/kernel/sigframe.h delete mode 100644 arch/x86/kernel/sigframe_32.h diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 8ea040124f7d..670c3c311289 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -10,7 +10,7 @@ #include #include #include -#include "sigframe_32.h" +#include "sigframe.h" #include #include #include diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h new file mode 100644 index 000000000000..72bbb519d2dc --- /dev/null +++ b/arch/x86/kernel/sigframe.h @@ -0,0 +1,27 @@ +#ifdef CONFIG_X86_32 +struct sigframe { + char __user *pretcode; + int sig; + struct sigcontext sc; + struct _fpstate fpstate; + unsigned long extramask[_NSIG_WORDS-1]; + char retcode[8]; +}; + +struct rt_sigframe { + char __user *pretcode; + int sig; + struct siginfo __user *pinfo; + void __user *puc; + struct siginfo info; + struct ucontext uc; + struct _fpstate fpstate; + char retcode[8]; +}; +#else +struct rt_sigframe { + char __user *pretcode; + struct ucontext uc; + struct siginfo info; +}; +#endif diff --git a/arch/x86/kernel/sigframe_32.h b/arch/x86/kernel/sigframe_32.h deleted file mode 100644 index 0b2221711dad..000000000000 --- a/arch/x86/kernel/sigframe_32.h +++ /dev/null @@ -1,21 +0,0 @@ -struct sigframe -{ - char __user *pretcode; - int sig; - struct sigcontext sc; - struct _fpstate fpstate; - unsigned long extramask[_NSIG_WORDS-1]; - char retcode[8]; -}; - -struct rt_sigframe -{ - char __user *pretcode; - int sig; - struct siginfo __user *pinfo; - void __user *puc; - struct siginfo info; - struct ucontext uc; - struct _fpstate fpstate; - char retcode[8]; -}; diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 47c85e6b14bb..5447fa5ec52c 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -24,7 +24,7 @@ #include #include #include -#include "sigframe_32.h" +#include "sigframe.h" #define DEBUG_SIG 0 diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 1045a07eeaec..8bb1013eb62b 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -26,6 +26,7 @@ #include #include #include +#include "sigframe.h" /* #define DEBUG_SIG 1 */ @@ -58,14 +59,6 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, /* * Do a signal return; undo the signal stack. */ - -struct rt_sigframe -{ - char __user *pretcode; - struct ucontext uc; - struct siginfo info; -}; - static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax) { -- cgit v1.2.1 From 866bc13fc4c625186dd01429c68c5cf708f1cfd5 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:10:02 -0800 Subject: x86: Unify argument names in signal_32|64.c Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/signal_32.c | 12 ++++++------ arch/x86/kernel/signal_64.c | 5 +++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 5447fa5ec52c..add9c6e9c44d 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -107,9 +107,9 @@ sys_sigaltstack(unsigned long bx) /* * Do a signal return; undo the signal stack. */ - static int -restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax) +restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, + unsigned long *pax) { unsigned int err = 0; @@ -165,19 +165,19 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax } } - err |= __get_user(*peax, &sc->ax); + err |= __get_user(*pax, &sc->ax); return err; badframe: return 1; } -asmlinkage int sys_sigreturn(unsigned long __unused) +asmlinkage unsigned long sys_sigreturn(unsigned long __unused) { struct pt_regs *regs = (struct pt_regs *) &__unused; struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8); sigset_t set; - int ax; + unsigned long ax; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -216,8 +216,8 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) { struct pt_regs *regs = (struct pt_regs *)&__unused; struct rt_sigframe __user *frame; + unsigned long ax; sigset_t set; - int ax; frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 8bb1013eb62b..f3247d71edbc 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -60,7 +60,8 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, * Do a signal return; undo the signal stack. */ static int -restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax) +restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, + unsigned long *pax) { unsigned int err = 0; @@ -113,7 +114,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned } } - err |= __get_user(*prax, &sc->ax); + err |= __get_user(*pax, &sc->ax); return err; badframe: -- cgit v1.2.1 From e0bf0f75bdc441abb05365abc56ee96ba44ca073 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 8 Feb 2008 12:10:03 -0800 Subject: x86: define DEBUG_SIG in signal_64.c Signed-off-by: Harvey Harrison Cc: Roland McGrath Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/signal_64.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index f3247d71edbc..043294582f41 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -28,7 +28,7 @@ #include #include "sigframe.h" -/* #define DEBUG_SIG 1 */ +#define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -142,7 +142,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; -#ifdef DEBUG_SIG +#if DEBUG_SIG printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax); #endif @@ -274,7 +274,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (err) goto give_sigsegv; -#ifdef DEBUG_SIG +#if DEBUG_SIG printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax); #endif @@ -302,7 +302,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); -#ifdef DEBUG_SIG +#if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif @@ -353,7 +353,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, { int ret; -#ifdef DEBUG_SIG +#if DEBUG_SIG printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n", current->pid, sig, regs->ip, regs->sp, regs); @@ -491,7 +491,7 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { -#ifdef DEBUG_SIG +#if DEBUG_SIG printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n", thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current)); #endif -- cgit v1.2.1 From 099e1377269a47ed30a00ee131001988e5bcaa9c Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 13 Feb 2008 20:54:58 +0000 Subject: x86: use ELF format in compressed images. Signed-off-by: Ian Campbell Cc: Ian Campbell Cc: Jeremy Fitzhardinge Cc: virtualization@lists.linux-foundation.org Cc: H. Peter Anvin Cc: Jeremy Fitzhardinge Cc: virtualization@lists.linux-foundation.org Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- Documentation/i386/boot.txt | 18 +++++++++++++ arch/x86/boot/Makefile | 14 ++++++++++ arch/x86/boot/compressed/Makefile | 2 +- arch/x86/boot/compressed/misc.c | 56 +++++++++++++++++++++++++++++++++++++++ arch/x86/boot/header.S | 6 +++++ 5 files changed, 95 insertions(+), 1 deletion(-) diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt index fc49b79bc1ab..b5f5ba1ea668 100644 --- a/Documentation/i386/boot.txt +++ b/Documentation/i386/boot.txt @@ -170,6 +170,8 @@ Offset Proto Name Meaning 0238/4 2.06+ cmdline_size Maximum size of the kernel command line 023C/4 2.07+ hardware_subarch Hardware subarchitecture 0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data +0248/4 2.08+ compressed_payload_offset +024C/4 2.08+ compressed_payload_length (1) For backwards compatibility, if the setup_sects field contains 0, the real value is 4. @@ -512,6 +514,22 @@ Protocol: 2.07+ A pointer to data that is specific to hardware subarch +Field name: compressed_payload_offset +Type: read +Offset/size: 0x248/4 +Protocol: 2.08+ + + If non-zero then this field contains the offset from the end of the + real-mode code to the compressed payload. The compression format + should be determined using the standard magic number, currently only + gzip is used. + +Field name: compressed_payload_length +Type: read +Offset/size: 0x24c/4 +Protocol: 2.08+ + + The length of the compressed payload. **** THE KERNEL COMMAND LINE diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index f88458e83ef0..9695affeb584 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -94,6 +94,20 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) +sed-offsets := -e 's/^00*/0/' \ + -e 's/^\([0-9a-fA-F]*\) . \(input_data\|input_data_end\)$$/\#define \2 0x\1/p' + +quiet_cmd_offsets = OFFSETS $@ + cmd_offsets = $(NM) $< | sed -n $(sed-offsets) > $@ + +$(obj)/offsets.h: $(obj)/compressed/vmlinux FORCE + $(call if_changed,offsets) + +targets += offsets.h + +AFLAGS_header.o += -I$(obj) +$(obj)/header.o: $(obj)/offsets.h + LDFLAGS_setup.elf := -T $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index d2b9f3bb87c0..92fdd35bd93e 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -22,7 +22,7 @@ $(obj)/vmlinux: $(src)/vmlinux_$(BITS).lds $(obj)/head_$(BITS).o $(obj)/misc.o $ $(call if_changed,ld) @: -OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S +OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 8182e32c1b42..69aec2f4155d 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -15,6 +15,10 @@ * we just keep it from happening */ #undef CONFIG_PARAVIRT +#ifdef CONFIG_X86_32 +#define _ASM_DESC_H_ 1 +#endif + #ifdef CONFIG_X86_64 #define _LINUX_STRING_H_ 1 #define __LINUX_BITMAP_H 1 @@ -22,6 +26,7 @@ #include #include +#include #include #include #include @@ -365,6 +370,56 @@ static void error(char *x) asm("hlt"); } +static void parse_elf(void *output) +{ +#ifdef CONFIG_X86_64 + Elf64_Ehdr ehdr; + Elf64_Phdr *phdrs, *phdr; +#else + Elf32_Ehdr ehdr; + Elf32_Phdr *phdrs, *phdr; +#endif + void *dest; + int i; + + memcpy(&ehdr, output, sizeof(ehdr)); + if(ehdr.e_ident[EI_MAG0] != ELFMAG0 || + ehdr.e_ident[EI_MAG1] != ELFMAG1 || + ehdr.e_ident[EI_MAG2] != ELFMAG2 || + ehdr.e_ident[EI_MAG3] != ELFMAG3) + { + error("Kernel is not a valid ELF file"); + return; + } + + putstr("Parsing ELF... "); + + phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum); + if (!phdrs) + error("Failed to allocate space for phdrs"); + + memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum); + + for (i=0; ip_type) { + case PT_LOAD: +#ifdef CONFIG_RELOCATABLE + dest = output; + dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); +#else + dest = (void*)(phdr->p_paddr); +#endif + memcpy(dest, + output + phdr->p_offset, + phdr->p_filesz); + break; + default: /* Ignore other PT_* */ break; + } + } +} + asmlinkage void decompress_kernel(void *rmode, memptr heap, uch *input_data, unsigned long input_len, uch *output) @@ -408,6 +463,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, makecrc(); putstr("\nDecompressing Linux... "); gunzip(); + parse_elf(output); putstr("done.\nBooting the kernel.\n"); return; } diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 64ad9016585a..8471658d5534 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -22,6 +22,7 @@ #include #include #include "boot.h" +#include "offsets.h" SETUPSECTS = 4 /* default nr of setup-sectors */ BOOTSEG = 0x07C0 /* original address of boot-sector */ @@ -223,6 +224,11 @@ hardware_subarch: .long 0 # subarchitecture, added with 2.07 hardware_subarch_data: .quad 0 +compressed_payload_offset: + .long input_data +compressed_payload_length: + .long input_data_end-input_data + # End of setup header ##################################################### .section ".inittext", "ax" -- cgit v1.2.1 From 7d6e737c8d2698b63ad10fd75cc6793380395d0e Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Sun, 17 Feb 2008 20:06:35 +0100 Subject: x86: add a crc32 checksum to the kernel image. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- Documentation/i386/boot.txt | 8 +++++ arch/x86/boot/tools/build.c | 88 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt index b5f5ba1ea668..05c24dfd7ecf 100644 --- a/Documentation/i386/boot.txt +++ b/Documentation/i386/boot.txt @@ -531,6 +531,14 @@ Protocol: 2.08+ The length of the compressed payload. +**** THE IMAGE CHECKSUM + +From boot protocol version 2.08 onwards the CRC-32 is calculated over +the entire file using the characteristic polynomial 0x04C11DB7 and an +initial remainder of 0xffffffff. The checksum is appended to the +file; therefore the CRC of the file up to the limit specified in the +syssize field of the header is always 0. + **** THE KERNEL COMMAND LINE The kernel command line has become an important way for the boot diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index b4248740ff0d..44dc1923c0e3 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -50,6 +50,75 @@ typedef unsigned long u32; u8 buf[SETUP_SECT_MAX*512]; int is_big_kernel; +/*----------------------------------------------------------------------*/ + +static const u32 crctab32[] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, + 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, + 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, + 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, + 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, + 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, + 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, + 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, + 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, + 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, + 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, + 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, + 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, + 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, + 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, + 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, + 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, + 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, + 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, + 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, + 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, + 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, + 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, + 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, + 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, + 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, + 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, + 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, + 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, + 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, + 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, + 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, + 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, + 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, + 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, + 0x2d02ef8d +}; + +static u32 partial_crc32_one(u8 c, u32 crc) +{ + return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8); +} + +static u32 partial_crc32(const u8 *s, int len, u32 crc) +{ + while (len--) + crc = partial_crc32_one(*s++, crc); + return crc; +} + static void die(const char * str, ...) { va_list args; @@ -74,6 +143,7 @@ int main(int argc, char ** argv) FILE *file; int fd; void *kernel; + u32 crc = 0xffffffffUL; if (argc > 2 && !strcmp(argv[1], "-b")) { @@ -144,7 +214,8 @@ int main(int argc, char ** argv) kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); if (kernel == MAP_FAILED) die("Unable to mmap '%s': %m", argv[2]); - sys_size = (sz + 15) / 16; + /* Number of 16-byte paragraphs, including space for a 4-byte CRC */ + sys_size = (sz + 15 + 4) / 16; if (!is_big_kernel && sys_size > DEF_SYSSIZE) die("System is too big. Try using bzImage or modules."); @@ -155,12 +226,27 @@ int main(int argc, char ** argv) buf[0x1f6] = sys_size >> 16; buf[0x1f7] = sys_size >> 24; + crc = partial_crc32(buf, i, crc); if (fwrite(buf, 1, i, stdout) != i) die("Writing setup failed"); /* Copy the kernel code */ + crc = partial_crc32(kernel, sz, crc); if (fwrite(kernel, 1, sz, stdout) != sz) die("Writing kernel failed"); + + /* Add padding leaving 4 bytes for the checksum */ + while (sz++ < (sys_size*16) - 4) { + crc = partial_crc32_one('\0', crc); + if (fwrite("\0", 1, 1, stdout) != 1) + die("Writing padding failed"); + } + + /* Write the CRC */ + fprintf(stderr, "CRC %lx\n", crc); + if (fwrite(&crc, 1, 4, stdout) != 4) + die("Writing CRC failed"); + close(fd); /* Everything is OK */ -- cgit v1.2.1 From 52b387197beb5c89f734f057553fdf7d417b448c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 17 Feb 2008 20:06:36 +0100 Subject: x86: bump image header to version 2.08. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/boot/header.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8471658d5534..40c91bb483e1 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -120,7 +120,7 @@ _start: # Part 2 of the header, from the old setup.S .ascii "HdrS" # header signature - .word 0x0207 # header version number (>= 0x0105) + .word 0x0208 # header version number (>= 0x0105) # or else old loadlin-1.5 will fail) .globl realmode_swtch realmode_swtch: .word 0, 0 # default_switch, SETUPSEG -- cgit v1.2.1 From 28d23128814dca9b90f04739b12f967f4248c584 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 7 Feb 2008 21:03:04 -0200 Subject: x86: reducing debuginfo size by removing unneeded includes I found it strange that the struct sk_buff definition was found inside the DWARF debugging sections in the generated object, so I verified and found that there is no need for the files that bring struct sk_buff definition into this file and verified also that sk_buff is not brought in indirectly too, thru other headers. I went on and removed many other unneeded includes and the end result is: [acme@doppio net-2.6]$ l /tmp/sys_ia32.o.before /tmp/sys_ia32.o.after -rw-rw-r-- 1 acme acme 185240 2008-02-06 19:19 /tmp/sys_ia32.o.after -rw-rw-r-- 1 acme acme 248328 2008-02-06 19:00 /tmp/sys_ia32.o.before Almost 64KB only on this object file! There were no other side effects from this change: [acme@doppio net-2.6]$ objcopy -j "text" /tmp/sys_ia32.o.before /tmp/text.before [acme@doppio net-2.6]$ objcopy -j "text" /tmp/sys_ia32.o.after /tmp/text.after [acme@doppio net-2.6]$ md5sum /tmp/text.before /tmp/text.after b7ac9b17942add68494e698e4f965d36 /tmp/text.before b7ac9b17942add68494e698e4f965d36 /tmp/text.after One of the complaints about using tools such as systemtap is that one has to install the huge kernel-debuginfo package: [acme@doppio net-2.6]$ rpm -q --qf "%{size}\n" kernel-rt-debuginfo 471737710 543867594 [acme@doppio net-2.6]$ Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/ia32/sys_ia32.c | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index abf71d26fc2a..44d931f08414 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -26,51 +26,27 @@ #include #include #include -#include #include #include -#include #include -#include -#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -#include #include -#include -#include -#include -#include #include #include #include #include -#include -#include #include #include #include #include #include #include -#include - -#include -#include #include +#include #define AA(__x) ((unsigned long)(__x)) -- cgit v1.2.1 From beafe91f1c2c49713221ca2616755e1f3d472084 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 16 Feb 2008 23:00:22 -0800 Subject: x86: get apic_id later in acpi_numa_processor_affinity_init we don't need get that so early. Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/srat_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 845001c617cc..04e06c8226e3 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -132,7 +132,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) int pxm, node; int apic_id; - apic_id = pa->apic_id; if (srat_disabled()) return; if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { @@ -148,6 +147,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) bad_srat(); return; } + + apic_id = pa->apic_id; apicid_to_node[apic_id] = node; acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", -- cgit v1.2.1 From 04adf11435a5187383c35017a94b55701984243b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 16 Feb 2008 23:02:03 -0800 Subject: x86: remove never used nodenumer in pda Signed-off-by: Yinghai Lu Cc: Andrew Morton Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/numa_64.c | 2 -- include/asm-x86/pda.h | 1 - 2 files changed, 3 deletions(-) diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 16b82ad34b96..18267a02e67a 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -548,8 +548,6 @@ void __cpuinit numa_set_node(int cpu, int node) { int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; - cpu_pda(cpu)->nodenumber = node; - if(cpu_to_node_map) cpu_to_node_map[cpu] = node; else if(per_cpu_offset(cpu)) diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index c0305bff0f19..d9dc209c24ad 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h @@ -22,7 +22,6 @@ struct x8664_pda { offset 40!!! */ #endif char *irqstackptr; - unsigned int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ short mmu_state; -- cgit v1.2.1 From e319e76521767f7f64cd1fb6f58d4d36bc861a67 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 13 Feb 2008 16:19:36 +0100 Subject: x86: apic: extended interrupt LVT support for AMD Signed-off-by: Robert Richter Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/apic_32.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 35a568ea8400..6aa93db7faa3 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -620,6 +620,35 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; } +/* + * Setup extended LVT, AMD specific (K8, family 10h) + * + * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and + * MCE interrupts are supported. Thus MCE offset must be set to 0. + */ + +#define APIC_EILVT_LVTOFF_MCE 0 +#define APIC_EILVT_LVTOFF_IBS 1 + +static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) +{ + unsigned long reg = (lvt_off << 4) + APIC_EILVT0; + unsigned int v = (mask << 16) | (msg_type << 8) | vector; + apic_write(reg, v); +} + +u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) +{ + setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); + return APIC_EILVT_LVTOFF_MCE; +} + +u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) +{ + setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); + return APIC_EILVT_LVTOFF_IBS; +} + /* * Local APIC start and shutdown */ -- cgit v1.2.1 From 270883a8b98af5e6ed591b4762fb046e9f044dfb Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 11 Feb 2008 17:16:01 -0200 Subject: x86: change vsmp compile dependency Change Makefile so vsmp_64.o object is dependent on PARAVIRT, rather than X86_VSMP Signed-off-by: Glauber Costa Signed-off-by: Ravikiran Thirumalai Acked-by: Shai Fultheim Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4eb5ce841106..80e6695a12a3 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -60,7 +60,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o -obj-$(CONFIG_X86_VSMP) += vsmp_64.o +obj-$(CONFIG_PARAVIRT) += vsmp_64.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_MODULES) += module_$(BITS).o obj-$(CONFIG_ACPI_SRAT) += srat_32.o -- cgit v1.2.1 From a2beab31b167bd8ba49bb84944e07ac096f2ab0a Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 11 Feb 2008 17:16:02 -0200 Subject: x86: make vsmp_init void, instead of static int Signed-off-by: Glauber Costa Signed-off-by: Ravikiran Thirumalai Acked-by: Shai Fultheim Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/vsmp_64.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index d971210a6d36..976691726de4 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -16,20 +16,20 @@ #include #include -static int __init vsmp_init(void) +static void __init vsmp_init(void) { void *address; unsigned int cap, ctl; if (!early_pci_allowed()) - return 0; + return; /* Check if we are running on a ScaleMP vSMP box */ if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) || (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) - return 0; + return; /* set vSMP magic bits to indicate vSMP capable kernel */ address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8); @@ -46,7 +46,7 @@ static int __init vsmp_init(void) } iounmap(address); - return 0; + return; } core_initcall(vsmp_init); -- cgit v1.2.1 From 2785c8d052278228cc3806233c09295088f83d42 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 11 Feb 2008 17:16:03 -0200 Subject: x86: call vsmp_init explicitly It becomes to early for ioremap, so we use early_ioremap Signed-off-by: Glauber Costa Signed-off-by: Ravikiran Thirumalai Acked-by: Shai Fultheim Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/setup_64.c | 4 ++++ arch/x86/kernel/vsmp_64.c | 11 +++++------ include/asm-x86/setup.h | 4 ++++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index f4f7ecfb898c..e67925674eae 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -345,6 +345,10 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled) efi_init(); +#ifdef CONFIG_PARAVIRT + vsmp_init(); +#endif + dmi_scan_machine(); io_delay_init(); diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 976691726de4..fdf9fba6ba9c 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -16,10 +16,10 @@ #include #include -static void __init vsmp_init(void) +void __init vsmp_init(void) { void *address; - unsigned int cap, ctl; + unsigned int cap, ctl, cfg; if (!early_pci_allowed()) return; @@ -32,7 +32,8 @@ static void __init vsmp_init(void) return; /* set vSMP magic bits to indicate vSMP capable kernel */ - address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8); + cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0); + address = early_ioremap(cfg, 8); cap = readl(address); ctl = readl(address + 4); printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", @@ -45,8 +46,6 @@ static void __init vsmp_init(void) printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl); } - iounmap(address); + early_iounmap(address, 8); return; } - -core_initcall(vsmp_init); diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 071e054abd82..f745de211191 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h @@ -4,6 +4,10 @@ #define COMMAND_LINE_SIZE 2048 #ifndef __ASSEMBLY__ + +/* Interrupt control for vSMPowered x86_64 systems */ +void vsmp_init(void); + char *machine_specific_memory_setup(void); #ifndef CONFIG_PARAVIRT #define paravirt_post_allocator_init() do {} while (0) -- cgit v1.2.1 From 96597fd2be7070631ad0776cd8bced21415fd5e3 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 11 Feb 2008 17:16:04 -0200 Subject: x86: introduce vsmp paravirt helpers Signed-off-by: Glauber Costa Signed-off-by: Ravikiran Thirumalai Acked-by: Shai Fultheim Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig | 3 ++- arch/x86/kernel/vsmp_64.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6c70fed0f9a0..ed7f72b39113 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -328,7 +328,8 @@ config X86_RDC321X config X86_VSMP bool "Support for ScaleMP vSMP" depends on X86_64 && PCI - help + select PARAVIRT + help Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is supposed to run on these EM64T-based machines. Only choose this option if you have one of these machines. diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index fdf9fba6ba9c..b93ed66c754f 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -8,6 +8,8 @@ * * Ravikiran Thirumalai , * Shai Fultheim + * Paravirt ops integration: Glauber de Oliveira Costa , + * Ravikiran Thirumalai */ #include @@ -15,6 +17,60 @@ #include #include #include +#include + +/* + * Interrupt control on vSMPowered systems: + * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' + * and vice versa. + */ + +static unsigned long vsmp_save_fl(void) +{ + unsigned long flags = native_save_fl(); + + if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC)) + flags &= ~X86_EFLAGS_IF; + return flags; +} + +static void vsmp_restore_fl(unsigned long flags) +{ + if (flags & X86_EFLAGS_IF) + flags &= ~X86_EFLAGS_AC; + else + flags |= X86_EFLAGS_AC; + native_restore_fl(flags); +} + +static void vsmp_irq_disable(void) +{ + unsigned long flags = native_save_fl(); + + native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); +} + +static void vsmp_irq_enable(void) +{ + unsigned long flags = native_save_fl(); + + native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); +} + +static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, + unsigned long addr, unsigned len) +{ + switch (type) { + case PARAVIRT_PATCH(pv_irq_ops.irq_enable): + case PARAVIRT_PATCH(pv_irq_ops.irq_disable): + case PARAVIRT_PATCH(pv_irq_ops.save_fl): + case PARAVIRT_PATCH(pv_irq_ops.restore_fl): + return paravirt_patch_default(type, clobbers, ibuf, addr, len); + default: + return native_patch(type, clobbers, ibuf, addr, len); + } + +} void __init vsmp_init(void) { -- cgit v1.2.1 From bc7c314d7048017caa0725b41cc577cccf4fc53b Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 11 Feb 2008 17:16:05 -0200 Subject: x86, vsmp: use the paravirt helpers Signed-off-by: Glauber Costa Signed-off-by: Ravikiran Thirumalai Acked-by: Shai Fultheim Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/vsmp_64.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index b93ed66c754f..54202b1805da 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -87,6 +87,13 @@ void __init vsmp_init(void) PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) return; + /* If we are, use the distinguished irq functions */ + pv_irq_ops.irq_disable = vsmp_irq_disable; + pv_irq_ops.irq_enable = vsmp_irq_enable; + pv_irq_ops.save_fl = vsmp_save_fl; + pv_irq_ops.restore_fl = vsmp_restore_fl; + pv_init_ops.patch = vsmp_patch; + /* set vSMP magic bits to indicate vSMP capable kernel */ cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0); address = early_ioremap(cfg, 8); -- cgit v1.2.1 From 03ae5768b6110ebaa97dc3e7abf1c3d8bec5f874 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 15 Feb 2008 12:00:23 +0100 Subject: x86: use ELF section to list CPU vendor specific code Replace the hardcoded list of initialization functions for each CPU vendor by a list in an ELF section, which is read at initialization in arch/x86/kernel/cpu/cpu.c to fill the cpu_devs[] array. The ELF section, named .x86cpuvendor.init, is reclaimed after boot, and contains entries of type "struct cpu_vendor_dev" which associates a vendor number with a pointer to a "struct cpu_dev" structure. This first modification allows to remove all the VENDOR_init_cpu() functions. This patch also removes the hardcoded calls to early_init_amd() and early_init_intel(). Instead, we add a "c_early_init" member to the cpu_dev structure, which is then called if not NULL by the generic CPU initialization code. Unfortunately, in early_cpu_detect(), this_cpu is not yet set, so we have to use the cpu_devs[] array directly. This patch is part of the Linux Tiny project, and is needed for further patch that will allow to disable compilation of unused CPU support code. Signed-off-by: Thomas Petazzoni Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/amd.c | 5 ++++- arch/x86/kernel/cpu/centaur.c | 6 +----- arch/x86/kernel/cpu/common.c | 33 ++++++++++----------------------- arch/x86/kernel/cpu/cpu.h | 26 +++++++++++++------------- arch/x86/kernel/cpu/cyrix.c | 13 ++----------- arch/x86/kernel/cpu/intel.c | 9 +++------ arch/x86/kernel/cpu/transmeta.c | 6 +----- arch/x86/kernel/cpu/umc.c | 7 ++----- arch/x86/kernel/vmlinux_32.lds.S | 5 +++++ arch/x86/kernel/vmlinux_64.lds.S | 5 +++++ 10 files changed, 46 insertions(+), 69 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 693e353999cd..cab4e562b5cb 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -63,7 +63,7 @@ static __cpuinit int amd_apic_timer_broken(void) int force_mwait __cpuinitdata; -void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) { if (cpuid_eax(0x80000000) >= 0x80000007) { c->x86_power = cpuid_edx(0x80000007); @@ -336,6 +336,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { } }, }, + .c_early_init = early_init_amd, .c_init = init_amd, .c_size_cache = amd_size_cache, }; @@ -345,3 +346,5 @@ int __init amd_init_cpu(void) cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; return 0; } + +cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 9681fa15ddf0..194ec8311c3b 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -464,8 +464,4 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { .c_size_cache = centaur_size_cache, }; -int __init centaur_init_cpu(void) -{ - cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; - return 0; -} +cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a38aafaefc23..0fd6be154d5d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -328,14 +328,9 @@ static void __init early_cpu_detect(void) get_cpu_vendor(c, 1); - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - early_init_amd(c); - break; - case X86_VENDOR_INTEL: - early_init_intel(c); - break; - } + if (c->x86_vendor != X86_VENDOR_UNKNOWN && + cpu_devs[c->x86_vendor]->c_early_init) + cpu_devs[c->x86_vendor]->c_early_init(c); early_get_cap(c); } @@ -616,23 +611,15 @@ __setup("clearcpuid=", setup_disablecpuid); cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; -/* This is hacky. :) - * We're emulating future behavior. - * In the future, the cpu-specific init functions will be called implicitly - * via the magic of initcalls. - * They will insert themselves into the cpu_devs structure. - * Then, when cpu_init() is called, we can just iterate over that array. - */ void __init early_cpu_init(void) { - intel_cpu_init(); - cyrix_init_cpu(); - nsc_init_cpu(); - amd_init_cpu(); - centaur_init_cpu(); - transmeta_init_cpu(); - nexgen_init_cpu(); - umc_init_cpu(); + struct cpu_vendor_dev *cvdev; + + for (cvdev = __x86cpuvendor_start ; + cvdev < __x86cpuvendor_end ; + cvdev++) + cpu_devs[cvdev->vendor] = cvdev->cpu_dev; + early_cpu_detect(); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index e0b38c33d842..783691b2a738 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -14,6 +14,7 @@ struct cpu_dev { struct cpu_model_info c_models[4]; + void (*c_early_init)(struct cpuinfo_x86 *c); void (*c_init)(struct cpuinfo_x86 * c); void (*c_identify)(struct cpuinfo_x86 * c); unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); @@ -21,18 +22,17 @@ struct cpu_dev { extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; +struct cpu_vendor_dev { + int vendor; + struct cpu_dev *cpu_dev; +}; + +#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \ + static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \ + __attribute__((__section__(".x86cpuvendor.init"))) = \ + { cpu_vendor_id, cpu_dev } + +extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[]; + extern int get_model_name(struct cpuinfo_x86 *c); extern void display_cacheinfo(struct cpuinfo_x86 *c); - -extern void early_init_intel(struct cpuinfo_x86 *c); -extern void early_init_amd(struct cpuinfo_x86 *c); - -/* Specific CPU type init functions */ -int intel_cpu_init(void); -int amd_init_cpu(void); -int cyrix_init_cpu(void); -int nsc_init_cpu(void); -int centaur_init_cpu(void); -int transmeta_init_cpu(void); -int nexgen_init_cpu(void); -int umc_init_cpu(void); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 7139b0262703..9c4ee98f2cb8 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -439,11 +439,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { .c_identify = cyrix_identify, }; -int __init cyrix_init_cpu(void) -{ - cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev; - return 0; -} +cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); static struct cpu_dev nsc_cpu_dev __cpuinitdata = { .c_vendor = "NSC", @@ -451,9 +447,4 @@ static struct cpu_dev nsc_cpu_dev __cpuinitdata = { .c_init = init_nsc, }; -int __init nsc_init_cpu(void) -{ - cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev; - return 0; -} - +cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fae31ce747bd..34468b2e2507 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -30,7 +30,7 @@ struct movsl_mask movsl_mask __read_mostly; #endif -void __cpuinit early_init_intel(struct cpuinfo_x86 *c) +static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) @@ -290,15 +290,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { } }, }, + .c_early_init = early_init_intel, .c_init = init_intel, .c_size_cache = intel_size_cache, }; -__init int intel_cpu_init(void) -{ - cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; - return 0; -} +cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); #ifndef CONFIG_X86_CMPXCHG unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index e8b422c1c512..c2d168e992f4 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -102,8 +102,4 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { .c_identify = transmeta_identify, }; -int __init transmeta_init_cpu(void) -{ - cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev; - return 0; -} +cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index a7a4e75bdcd7..b1acf08245fb 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c @@ -19,8 +19,5 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = { }, }; -int __init umc_init_cpu(void) -{ - cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev; - return 0; -} +cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); + diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 2ffa9656fe7a..ce5ed083a1e9 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -149,6 +149,11 @@ SECTIONS *(.con_initcall.init) __con_initcall_end = .; } + .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { + __x86cpuvendor_start = .; + *(.x86cpuvendor.init) + __x86cpuvendor_end = .; + } SECURITY_INIT . = ALIGN(4); .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 4c369451007b..b7ab3c335fae 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -177,6 +177,11 @@ SECTIONS *(.con_initcall.init) } __con_initcall_end = .; + __x86cpuvendor_start = .; + .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { + *(.x86cpuvendor.init) + } + __x86cpuvendor_end = .; SECURITY_INIT . = ALIGN(8); -- cgit v1.2.1 From 04aaa7ba096c707a8df337b29303f1a5a65f0462 Mon Sep 17 00:00:00 2001 From: "David P. Reed" Date: Sun, 17 Feb 2008 16:56:39 -0500 Subject: x86: fix cmos read and write to not use inb_p and outb_p fix code to access CMOS rtc registers so that it does not use inb_p and outb_p routines, which are deprecated. Extensive research on all known CMOS RTC chipset timing shows that there is no need for a delay in accessing the registers of these chips even on old machines. These chipa are never on an expansion bus, but have always been "motherboard" resources, either in the processor chipset or explicitly on the motherboard, and they are not part of the ISA/LPC or PCI buses, so delays should not be based on bus timing. The reason to fix it: 1) port 80 writes often hang some laptops that use ENE EC chipsets, esp. those designed and manufactured by Quanta for HP; 2) RTC accesses are timing sensitive, and extra microseconds may matter; 3) the new "io_delay" function is calibrated by expansion bus timing needs, thus is not appropriate for access to CMOS rtc registers. Signed-off-by: David P. Reed Acked-by: Alan Cox Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/rtc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 91492190ac72..9615eee9b775 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -146,8 +146,8 @@ unsigned char rtc_cmos_read(unsigned char addr) unsigned char val; lock_cmos_prefix(addr); - outb_p(addr, RTC_PORT(0)); - val = inb_p(RTC_PORT(1)); + outb(addr, RTC_PORT(0)); + val = inb(RTC_PORT(1)); lock_cmos_suffix(addr); return val; } @@ -156,8 +156,8 @@ EXPORT_SYMBOL(rtc_cmos_read); void rtc_cmos_write(unsigned char val, unsigned char addr) { lock_cmos_prefix(addr); - outb_p(addr, RTC_PORT(0)); - outb_p(val, RTC_PORT(1)); + outb(addr, RTC_PORT(0)); + outb(val, RTC_PORT(1)); lock_cmos_suffix(addr); } EXPORT_SYMBOL(rtc_cmos_write); -- cgit v1.2.1 From ca5d3f14915f5f8db75f7b0c198c0c154947fc5e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 18 Feb 2008 08:53:56 +0100 Subject: x86: clean up mmx_32.c checkpatch.pl --file cleanups: before: total: 74 errors, 3 warnings, 386 lines checked after: total: 0 errors, 0 warnings, 377 lines checked no code changed: arch/x86/lib/mmx_32.o: text data bss dec hex filename 1323 0 8 1331 533 mmx_32.o.before 1323 0 8 1331 533 mmx_32.o.after md5: 4cc39f1017dc40a5ebf02ce0ff7312bc mmx_32.o.before.asm 4cc39f1017dc40a5ebf02ce0ff7312bc mmx_32.o.after.asm Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/lib/mmx_32.c | 197 ++++++++++++++++++++++++-------------------------- 1 file changed, 94 insertions(+), 103 deletions(-) diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index cc9b4a4450f3..c9f2d9ba8dd8 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -1,32 +1,30 @@ -#include -#include -#include -#include -#include - -#include -#include - - /* * MMX 3DNow! library helper functions * * To do: - * We can use MMX just for prefetch in IRQ's. This may be a win. + * We can use MMX just for prefetch in IRQ's. This may be a win. * (reported so on K6-III) * We should use a better code neutral filler for the short jump * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? * We also want to clobber the filler register so we don't get any - * register forwarding stalls on the filler. + * register forwarding stalls on the filler. * * Add *user handling. Checksums are not a win with MMX on any CPU * tested so far for any MMX solution figured. * - * 22/09/2000 - Arjan van de Ven - * Improved for non-egineering-sample Athlons + * 22/09/2000 - Arjan van de Ven + * Improved for non-egineering-sample Athlons * */ - +#include +#include +#include +#include +#include + +#include +#include + void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; @@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); - - - for(; i>5; i--) - { + _ASM_EXTABLE(1b, 3b) + : : "r" (from)); + + for ( ; i > 5; i--) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } - for(; i>0; i--) - { + for ( ; i > 0; i--) { __asm__ __volatile__ ( " movq (%0), %%mm0\n" " movq 8(%0), %%mm1\n" @@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) " movq %%mm1, 40(%1)\n" " movq %%mm2, 48(%1)\n" " movq %%mm3, 56(%1)\n" - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } /* - * Now do the tail of the block + * Now do the tail of the block: */ - __memcpy(to, from, len&63); + __memcpy(to, from, len & 63); kernel_fpu_end(); + return p; } +EXPORT_SYMBOL(_mmx_memcpy); #ifdef CONFIG_MK7 @@ -128,13 +127,12 @@ static void fast_clear_page(void *page) int i; kernel_fpu_begin(); - + __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); - for(i=0;i<4096/64;i++) - { + for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( " movntq %%mm0, (%0)\n" " movntq %%mm0, 8(%0)\n" @@ -145,14 +143,15 @@ static void fast_clear_page(void *page) " movntq %%mm0, 48(%0)\n" " movntq %%mm0, 56(%0)\n" : : "r" (page) : "memory"); - page+=64; + page += 64; } - /* since movntq is weakly-ordered, a "sfence" is needed to become - * ordered again. + + /* + * Since movntq is weakly-ordered, a "sfence" is needed to become + * ordered again: */ - __asm__ __volatile__ ( - " sfence \n" : : - ); + __asm__ __volatile__("sfence\n"::); + kernel_fpu_end(); } @@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from) kernel_fpu_begin(); - /* maybe the prefetch stuff can go before the expensive fnsave... + /* + * maybe the prefetch stuff can go before the expensive fnsave... * but that is for later. -AV */ - __asm__ __volatile__ ( + __asm__ __volatile__( "1: prefetch (%0)\n" " prefetch 64(%0)\n" " prefetch 128(%0)\n" @@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); + _ASM_EXTABLE(1b, 3b) : : "r" (from)); - for(i=0; i<(4096-320)/64; i++) - { + for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } - for(i=(4096-320)/64; i<4096/64; i++) - { + + for (i = (4096-320)/64; i < 4096/64; i++) { __asm__ __volatile__ ( "2: movq (%0), %%mm0\n" " movntq %%mm0, (%1)\n" @@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from) " movntq %%mm6, 48(%1)\n" " movq 56(%0), %%mm7\n" " movntq %%mm7, 56(%1)\n" - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + : : "r" (from), "r" (to) : "memory"); + from += 64; + to += 64; } - /* since movntq is weakly-ordered, a "sfence" is needed to become - * ordered again. + /* + * Since movntq is weakly-ordered, a "sfence" is needed to become + * ordered again: */ - __asm__ __volatile__ ( - " sfence \n" : : - ); + __asm__ __volatile__("sfence \n"::); kernel_fpu_end(); } -#else +#else /* CONFIG_MK7 */ /* * Generic MMX implementation without K7 specific streaming */ - static void fast_clear_page(void *page) { int i; - + kernel_fpu_begin(); - + __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); - for(i=0;i<4096/128;i++) - { + for (i = 0; i < 4096/128; i++) { __asm__ __volatile__ ( " movq %%mm0, (%0)\n" " movq %%mm0, 8(%0)\n" @@ -275,8 +270,8 @@ static void fast_clear_page(void *page) " movq %%mm0, 104(%0)\n" " movq %%mm0, 112(%0)\n" " movq %%mm0, 120(%0)\n" - : : "r" (page) : "memory"); - page+=128; + : : "r" (page) : "memory"); + page += 128; } kernel_fpu_end(); @@ -285,8 +280,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; - - + kernel_fpu_begin(); __asm__ __volatile__ ( @@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); + _ASM_EXTABLE(1b, 3b) : : "r" (from)); - for(i=0; i<4096/64; i++) - { + for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } kernel_fpu_end(); } - -#endif +#endif /* !CONFIG_MK7 */ /* - * Favour MMX for page clear and copy. + * Favour MMX for page clear and copy: */ - -static void slow_zero_page(void * page) +static void slow_zero_page(void *page) { int d0, d1; - __asm__ __volatile__( \ - "cld\n\t" \ - "rep ; stosl" \ - : "=&c" (d0), "=&D" (d1) - :"a" (0),"1" (page),"0" (1024) - :"memory"); + + __asm__ __volatile__( + "cld\n\t" + "rep ; stosl" + + : "=&c" (d0), "=&D" (d1) + :"a" (0), "1" (page), "0" (1024) + :"memory"); } - -void mmx_clear_page(void * page) + +void mmx_clear_page(void *page) { - if(unlikely(in_interrupt())) + if (unlikely(in_interrupt())) slow_zero_page(page); else fast_clear_page(page); } +EXPORT_SYMBOL(mmx_clear_page); static void slow_copy_page(void *to, void *from) { int d0, d1, d2; - __asm__ __volatile__( \ - "cld\n\t" \ - "rep ; movsl" \ - : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ - : "0" (1024),"1" ((long) to),"2" ((long) from) \ + + __asm__ __volatile__( + "cld\n\t" + "rep ; movsl" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + : "0" (1024), "1" ((long) to), "2" ((long) from) : "memory"); } - void mmx_copy_page(void *to, void *from) { - if(unlikely(in_interrupt())) + if (unlikely(in_interrupt())) slow_copy_page(to, from); else fast_copy_page(to, from); } - -EXPORT_SYMBOL(_mmx_memcpy); -EXPORT_SYMBOL(mmx_clear_page); EXPORT_SYMBOL(mmx_copy_page); -- cgit v1.2.1 From 29a9994bd8dbafc17f43d31651d31ea7b0add6a4 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Sun, 17 Feb 2008 23:30:23 +0100 Subject: x86: coding style fixes for arch/x86/kernel/cpu/centaur.c Kills more than 150 errors/warnings Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/centaur.c | 230 +++++++++++++++++++++--------------------- 1 file changed, 114 insertions(+), 116 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 194ec8311c3b..710fe1ed0731 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -11,21 +11,21 @@ static u32 __cpuinit power2(u32 x) { - u32 s=1; - while(s<=x) - s<<=1; - return s>>=1; + u32 s = 1; + while(s <= x) + s <<= 1; + return s >>= 1; } /* * Set up an actual MCR */ - + static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) { u32 lo, hi; - + hi = base & ~0xFFF; lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ lo &= ~0xFFF; /* Remove the ctrl value bits */ @@ -45,7 +45,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */ int i; u32 top = 0; u32 clip = 0xFFFFFFFFUL; - + for (i = 0; i < e820.nr_map; i++) { unsigned long start, end; @@ -55,10 +55,10 @@ static u32 __cpuinit ramtop(void) /* 16388 */ * Don't MCR over reserved space. Ignore the ISA hole * we frob around that catastrophe already */ - + if (e820.map[i].type == E820_RESERVED) { - if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip) + if (e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip) clip = e820.map[i].addr; continue; } @@ -71,19 +71,19 @@ static u32 __cpuinit ramtop(void) /* 16388 */ } /* Everything below 'top' should be RAM except for the ISA hole. Because of the limited MCR's we want to map NV/ACPI into our - MCR range for gunk in RAM - + MCR range for gunk in RAM + Clip might cause us to MCR insufficient RAM but that is an acceptable failure mode and should only bite obscure boxes with a VESA hole at 15Mb - + The second case Clip sometimes kicks in is when the EBDA is marked as reserved. Again we fail safe with reasonable results */ - - if(top>clip) - top=clip; - + + if(top > clip) + top = clip; + return top; } @@ -99,8 +99,8 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) u32 top = root; u32 floor = 0; int ct = 0; - - while(ct high && fspace > low) + + if (fspace > high && fspace > low) { centaur_mcr_insert(ct, floor, fspace, key); floor += fspace; } - else if(high > low) - { + else if (high > low) { centaur_mcr_insert(ct, top, high, key); top += high; } - else if(low > 0) - { + else if (low > 0) { base -= low; centaur_mcr_insert(ct, base, low, key); } @@ -162,7 +160,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) * We loaded ct values. We now need to set the mask. The caller * must do this bit. */ - + return ct; } @@ -173,7 +171,7 @@ static void __cpuinit centaur_create_optimal_mcr(void) * Allocate up to 6 mcrs to mark as much of ram as possible * as write combining and weak write ordered. * - * To experiment with: Linux never uses stack operations for + * To experiment with: Linux never uses stack operations for * mmio spaces so we could globally enable stack operation wc * * Load the registers with type 31 - full write combining, all @@ -184,8 +182,8 @@ static void __cpuinit centaur_create_optimal_mcr(void) /* * Wipe unused MCRs */ - - for(i=used;i<8;i++) + + for (i = used; i < 8; i++) wrmsr(MSR_IDT_MCR0+i, 0, 0); } @@ -205,21 +203,21 @@ static void __cpuinit winchip2_create_optimal_mcr(void) */ int used = centaur_mcr_compute(6, 25); - + /* * Mark the registers we are using. */ - + rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - for(i=0;i>17) & 7; lo |= key<<6; /* replace with unlock key */ wrmsr(MSR_IDT_MCR_CTRL, lo, hi); @@ -242,9 +240,9 @@ static void __cpuinit winchip2_unprotect_mcr(void) static void __cpuinit winchip2_protect_mcr(void) { u32 lo, hi; - + rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - lo&=~0x1C0; /* blank bits 8-6 */ + lo &= ~0x1C0; /* blank bits 8-6 */ wrmsr(MSR_IDT_MCR_CTRL, lo, hi); } #endif /* CONFIG_X86_OOSTORE */ @@ -267,17 +265,17 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) /* enable ACE unit, if present and disabled */ if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { - rdmsr (MSR_VIA_FCR, lo, hi); + rdmsr(MSR_VIA_FCR, lo, hi); lo |= ACE_FCR; /* enable ACE unit */ - wrmsr (MSR_VIA_FCR, lo, hi); + wrmsr(MSR_VIA_FCR, lo, hi); printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); } /* enable RNG unit, if present and disabled */ if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { - rdmsr (MSR_VIA_RNG, lo, hi); + rdmsr(MSR_VIA_RNG, lo, hi); lo |= RNG_ENABLE; /* enable RNG unit */ - wrmsr (MSR_VIA_RNG, lo, hi); + wrmsr(MSR_VIA_RNG, lo, hi); printk(KERN_INFO "CPU: Enabled h/w RNG\n"); } @@ -288,15 +286,15 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) } /* Cyrix III family needs CX8 & PGE explicitly enabled. */ - if (c->x86_model >=6 && c->x86_model <= 9) { - rdmsr (MSR_VIA_FCR, lo, hi); + if (c->x86_model >= 6 && c->x86_model <= 9) { + rdmsr(MSR_VIA_FCR, lo, hi); lo |= (1<<1 | 1<<7); - wrmsr (MSR_VIA_FCR, lo, hi); + wrmsr(MSR_VIA_FCR, lo, hi); set_bit(X86_FEATURE_CX8, c->x86_capability); } /* Before Nehemiah, the C3's had 3dNOW! */ - if (c->x86_model >=6 && c->x86_model <9) + if (c->x86_model >= 6 && c->x86_model < 9) set_bit(X86_FEATURE_3DNOW, c->x86_capability); get_model_name(c); @@ -306,31 +304,31 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) static void __cpuinit init_centaur(struct cpuinfo_x86 *c) { enum { - ECX8=1<<1, - EIERRINT=1<<2, - DPM=1<<3, - DMCE=1<<4, - DSTPCLK=1<<5, - ELINEAR=1<<6, - DSMC=1<<7, - DTLOCK=1<<8, - EDCTLB=1<<8, - EMMX=1<<9, - DPDC=1<<11, - EBRPRED=1<<12, - DIC=1<<13, - DDC=1<<14, - DNA=1<<15, - ERETSTK=1<<16, - E2MMX=1<<19, - EAMD3D=1<<20, + ECX8 = 1<<1, + EIERRINT = 1<<2, + DPM = 1<<3, + DMCE = 1<<4, + DSTPCLK = 1<<5, + ELINEAR = 1<<6, + DSMC = 1<<7, + DTLOCK = 1<<8, + EDCTLB = 1<<8, + EMMX = 1<<9, + DPDC = 1<<11, + EBRPRED = 1<<12, + DIC = 1<<13, + DDC = 1<<14, + DNA = 1<<15, + ERETSTK = 1<<16, + E2MMX = 1<<19, + EAMD3D = 1<<20, }; char *name; - u32 fcr_set=0; - u32 fcr_clr=0; - u32 lo,hi,newlo; - u32 aa,bb,cc,dd; + u32 fcr_set = 0; + u32 fcr_clr = 0; + u32 lo, hi, newlo; + u32 aa, bb, cc, dd; /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ @@ -338,12 +336,12 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) switch (c->x86) { - case 5: - switch(c->x86_model) { + case 5: + switch (c->x86_model) { case 4: - name="C6"; - fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; - fcr_clr=DPDC; + name = "C6"; + fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; + fcr_clr = DPDC; printk(KERN_NOTICE "Disabling bugged TSC.\n"); clear_bit(X86_FEATURE_TSC, c->x86_capability); #ifdef CONFIG_X86_OOSTORE @@ -351,29 +349,29 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) /* Enable write combining on non-stack, non-string write combining on string, all types - weak write ordering - - The C6 original lacks weak read order - + weak write ordering + + The C6 original lacks weak read order + Note 0x120 is write only on Winchip 1 */ - + wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); -#endif +#endif break; case 8: - switch(c->x86_mask) { + switch (c->x86_mask) { default: - name="2"; + name = "2"; break; case 7 ... 9: - name="2A"; + name = "2A"; break; case 10 ... 15: - name="2B"; + name = "2B"; break; } - fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; - fcr_clr=DPDC; + fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; + fcr_clr = DPDC; #ifdef CONFIG_X86_OOSTORE winchip2_unprotect_mcr(); winchip2_create_optimal_mcr(); @@ -381,17 +379,17 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) /* Enable write combining on non-stack, non-string write combining on string, all types - weak write ordering + weak write ordering */ - lo|=31; + lo |= 31; wrmsr(MSR_IDT_MCR_CTRL, lo, hi); winchip2_protect_mcr(); #endif break; case 9: - name="3"; - fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; - fcr_clr=DPDC; + name = "3"; + fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; + fcr_clr = DPDC; #ifdef CONFIG_X86_OOSTORE winchip2_unprotect_mcr(); winchip2_create_optimal_mcr(); @@ -399,50 +397,50 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) /* Enable write combining on non-stack, non-string write combining on string, all types - weak write ordering + weak write ordering */ - lo|=31; + lo |= 31; wrmsr(MSR_IDT_MCR_CTRL, lo, hi); winchip2_protect_mcr(); #endif break; default: - name="??"; + name = "??"; } rdmsr(MSR_IDT_FCR1, lo, hi); - newlo=(lo|fcr_set) & (~fcr_clr); + newlo = (lo|fcr_set) & (~fcr_clr); - if (newlo!=lo) { - printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo ); - wrmsr(MSR_IDT_FCR1, newlo, hi ); + if (newlo != lo) { + printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo); + wrmsr(MSR_IDT_FCR1, newlo, hi); } else { - printk(KERN_INFO "Centaur FCR is 0x%X\n",lo); + printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); } /* Emulate MTRRs using Centaur's MCR. */ set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); /* Report CX8 */ set_bit(X86_FEATURE_CX8, c->x86_capability); /* Set 3DNow! on Winchip 2 and above. */ - if (c->x86_model >=8) + if (c->x86_model >= 8) set_bit(X86_FEATURE_3DNOW, c->x86_capability); /* See if we can find out some more. */ - if ( cpuid_eax(0x80000000) >= 0x80000005 ) { + if (cpuid_eax(0x80000000) >= 0x80000005) { /* Yes, we can. */ - cpuid(0x80000005,&aa,&bb,&cc,&dd); + cpuid(0x80000005, &aa, &bb, &cc, &dd); /* Add L1 data and code cache sizes. */ c->x86_cache_size = (cc>>24)+(dd>>24); } - sprintf( c->x86_model_id, "WinChip %s", name ); + sprintf(c->x86_model_id, "WinChip %s", name); break; - case 6: + case 6: init_c3(c); break; } } -static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) +static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* VIA C3 CPUs (670-68F) need further shifting. */ if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) @@ -451,8 +449,8 @@ static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigne /* VIA also screwed up Nehemiah stepping 1, and made it return '65KB' instead of '64KB' - Note, it seems this may only be in engineering samples. */ - if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65)) - size -=1; + if ((c->x86 == 6) && (c->x86_model == 9) && (c->x86_mask == 1) && (size == 65)) + size -= 1; return size; } -- cgit v1.2.1 From edc05e6de3e2fd203da21ba984b19d92e5398b62 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 18 Feb 2008 03:30:47 +0100 Subject: x86: more coding style fixes in centaur.c no code changed: arch/x86/kernel/cpu/centaur.o: text data bss dec hex filename 1031 324 0 1355 54b centaur.o.before 1031 324 0 1355 54b centaur.o.after md5: 4f306a7f980b58eb69c4bdcfcde565f1 centaur.o.before.asm 4f306a7f980b58eb69c4bdcfcde565f1 centaur.o.after.asm Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/centaur.c | 394 +++++++++++++++++++++--------------------- 1 file changed, 199 insertions(+), 195 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 710fe1ed0731..efe8da88da53 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -1,10 +1,12 @@ #include #include #include + #include #include #include #include + #include "cpu.h" #ifdef CONFIG_X86_OOSTORE @@ -12,16 +14,17 @@ static u32 __cpuinit power2(u32 x) { u32 s = 1; - while(s <= x) + + while (s <= x) s <<= 1; + return s >>= 1; } /* - * Set up an actual MCR + * Set up an actual MCR */ - static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) { u32 lo, hi; @@ -35,16 +38,15 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) } /* - * Figure what we can cover with MCR's + * Figure what we can cover with MCR's * - * Shortcut: We know you can't put 4Gig of RAM on a winchip + * Shortcut: We know you can't put 4Gig of RAM on a winchip */ - -static u32 __cpuinit ramtop(void) /* 16388 */ +static u32 __cpuinit ramtop(void) { - int i; - u32 top = 0; u32 clip = 0xFFFFFFFFUL; + u32 top = 0; + int i; for (i = 0; i < e820.nr_map; i++) { unsigned long start, end; @@ -52,13 +54,12 @@ static u32 __cpuinit ramtop(void) /* 16388 */ if (e820.map[i].addr > 0xFFFFFFFFUL) continue; /* - * Don't MCR over reserved space. Ignore the ISA hole - * we frob around that catastrophe already + * Don't MCR over reserved space. Ignore the ISA hole + * we frob around that catastrophe already */ - - if (e820.map[i].type == E820_RESERVED) - { - if (e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip) + if (e820.map[i].type == E820_RESERVED) { + if (e820.map[i].addr >= 0x100000UL && + e820.map[i].addr < clip) clip = e820.map[i].addr; continue; } @@ -69,28 +70,27 @@ static u32 __cpuinit ramtop(void) /* 16388 */ if (end > top) top = end; } - /* Everything below 'top' should be RAM except for the ISA hole. - Because of the limited MCR's we want to map NV/ACPI into our - MCR range for gunk in RAM - - Clip might cause us to MCR insufficient RAM but that is an - acceptable failure mode and should only bite obscure boxes with - a VESA hole at 15Mb - - The second case Clip sometimes kicks in is when the EBDA is marked - as reserved. Again we fail safe with reasonable results - */ - - if(top > clip) + /* + * Everything below 'top' should be RAM except for the ISA hole. + * Because of the limited MCR's we want to map NV/ACPI into our + * MCR range for gunk in RAM + * + * Clip might cause us to MCR insufficient RAM but that is an + * acceptable failure mode and should only bite obscure boxes with + * a VESA hole at 15Mb + * + * The second case Clip sometimes kicks in is when the EBDA is marked + * as reserved. Again we fail safe with reasonable results + */ + if (top > clip) top = clip; return top; } /* - * Compute a set of MCR's to give maximum coverage + * Compute a set of MCR's to give maximum coverage */ - static int __cpuinit centaur_mcr_compute(int nr, int key) { u32 mem = ramtop(); @@ -100,33 +100,31 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) u32 floor = 0; int ct = 0; - while (ct < nr) - { + while (ct < nr) { u32 fspace = 0; + u32 high; + u32 low; /* - * Find the largest block we will fill going upwards + * Find the largest block we will fill going upwards */ - - u32 high = power2(mem-top); + high = power2(mem-top); /* - * Find the largest block we will fill going downwards + * Find the largest block we will fill going downwards */ - - u32 low = base/2; + low = base/2; /* - * Don't fill below 1Mb going downwards as there - * is an ISA hole in the way. + * Don't fill below 1Mb going downwards as there + * is an ISA hole in the way. */ - if (base <= 1024*1024) low = 0; /* - * See how much space we could cover by filling below - * the ISA hole + * See how much space we could cover by filling below + * the ISA hole */ if (floor == 0) @@ -137,52 +135,48 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) /* And forget ROM space */ /* - * Now install the largest coverage we get + * Now install the largest coverage we get */ - - if (fspace > high && fspace > low) - { + if (fspace > high && fspace > low) { centaur_mcr_insert(ct, floor, fspace, key); floor += fspace; - } - else if (high > low) { + } else if (high > low) { centaur_mcr_insert(ct, top, high, key); top += high; - } - else if (low > 0) { + } else if (low > 0) { base -= low; centaur_mcr_insert(ct, base, low, key); - } - else break; + } else + break; ct++; } /* - * We loaded ct values. We now need to set the mask. The caller - * must do this bit. + * We loaded ct values. We now need to set the mask. The caller + * must do this bit. */ - return ct; } static void __cpuinit centaur_create_optimal_mcr(void) { + int used; int i; + /* - * Allocate up to 6 mcrs to mark as much of ram as possible - * as write combining and weak write ordered. + * Allocate up to 6 mcrs to mark as much of ram as possible + * as write combining and weak write ordered. * - * To experiment with: Linux never uses stack operations for - * mmio spaces so we could globally enable stack operation wc + * To experiment with: Linux never uses stack operations for + * mmio spaces so we could globally enable stack operation wc * - * Load the registers with type 31 - full write combining, all - * writes weakly ordered. + * Load the registers with type 31 - full write combining, all + * writes weakly ordered. */ - int used = centaur_mcr_compute(6, 31); + used = centaur_mcr_compute(6, 31); /* - * Wipe unused MCRs + * Wipe unused MCRs */ - for (i = used; i < 8; i++) wrmsr(MSR_IDT_MCR0+i, 0, 0); } @@ -190,31 +184,30 @@ static void __cpuinit centaur_create_optimal_mcr(void) static void __cpuinit winchip2_create_optimal_mcr(void) { u32 lo, hi; + int used; int i; /* - * Allocate up to 6 mcrs to mark as much of ram as possible - * as write combining, weak store ordered. + * Allocate up to 6 mcrs to mark as much of ram as possible + * as write combining, weak store ordered. * - * Load the registers with type 25 - * 8 - weak write ordering - * 16 - weak read ordering - * 1 - write combining + * Load the registers with type 25 + * 8 - weak write ordering + * 16 - weak read ordering + * 1 - write combining */ - - int used = centaur_mcr_compute(6, 25); + used = centaur_mcr_compute(6, 25); /* - * Mark the registers we are using. + * Mark the registers we are using. */ - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); for (i = 0; i < used; i++) lo |= 1<<(9+i); wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* - * Wipe unused MCRs + * Wipe unused MCRs */ for (i = used; i < 8; i++) @@ -222,9 +215,8 @@ static void __cpuinit winchip2_create_optimal_mcr(void) } /* - * Handle the MCR key on the Winchip 2. + * Handle the MCR key on the Winchip 2. */ - static void __cpuinit winchip2_unprotect_mcr(void) { u32 lo, hi; @@ -301,28 +293,29 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) display_cacheinfo(c); } +enum { + ECX8 = 1<<1, + EIERRINT = 1<<2, + DPM = 1<<3, + DMCE = 1<<4, + DSTPCLK = 1<<5, + ELINEAR = 1<<6, + DSMC = 1<<7, + DTLOCK = 1<<8, + EDCTLB = 1<<8, + EMMX = 1<<9, + DPDC = 1<<11, + EBRPRED = 1<<12, + DIC = 1<<13, + DDC = 1<<14, + DNA = 1<<15, + ERETSTK = 1<<16, + E2MMX = 1<<19, + EAMD3D = 1<<20, +}; + static void __cpuinit init_centaur(struct cpuinfo_x86 *c) { - enum { - ECX8 = 1<<1, - EIERRINT = 1<<2, - DPM = 1<<3, - DMCE = 1<<4, - DSTPCLK = 1<<5, - ELINEAR = 1<<6, - DSMC = 1<<7, - DTLOCK = 1<<8, - EDCTLB = 1<<8, - EMMX = 1<<9, - DPDC = 1<<11, - EBRPRED = 1<<12, - DIC = 1<<13, - DDC = 1<<14, - DNA = 1<<15, - ERETSTK = 1<<16, - E2MMX = 1<<19, - EAMD3D = 1<<20, - }; char *name; u32 fcr_set = 0; @@ -330,126 +323,137 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) u32 lo, hi, newlo; u32 aa, bb, cc, dd; - /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; - 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ + /* + * Bit 31 in normal CPUID used for nonstandard 3DNow ID; + * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway + */ clear_bit(0*32+31, c->x86_capability); switch (c->x86) { - case 5: - switch (c->x86_model) { - case 4: - name = "C6"; - fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; - fcr_clr = DPDC; - printk(KERN_NOTICE "Disabling bugged TSC.\n"); - clear_bit(X86_FEATURE_TSC, c->x86_capability); + switch (c->x86_model) { + case 4: + name = "C6"; + fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; + fcr_clr = DPDC; + printk(KERN_NOTICE "Disabling bugged TSC.\n"); + clear_bit(X86_FEATURE_TSC, c->x86_capability); #ifdef CONFIG_X86_OOSTORE - centaur_create_optimal_mcr(); - /* Enable - write combining on non-stack, non-string - write combining on string, all types - weak write ordering - - The C6 original lacks weak read order - - Note 0x120 is write only on Winchip 1 */ - - wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); + centaur_create_optimal_mcr(); + /* + * Enable: + * write combining on non-stack, non-string + * write combining on string, all types + * weak write ordering + * + * The C6 original lacks weak read order + * + * Note 0x120 is write only on Winchip 1 + */ + wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); #endif + break; + case 8: + switch (c->x86_mask) { + default: + name = "2"; break; - case 8: - switch (c->x86_mask) { - default: - name = "2"; - break; - case 7 ... 9: - name = "2A"; - break; - case 10 ... 15: - name = "2B"; - break; - } - fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; - fcr_clr = DPDC; + case 7 ... 9: + name = "2A"; + break; + case 10 ... 15: + name = "2B"; + break; + } + fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| + E2MMX|EAMD3D; + fcr_clr = DPDC; #ifdef CONFIG_X86_OOSTORE - winchip2_unprotect_mcr(); - winchip2_create_optimal_mcr(); - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - /* Enable - write combining on non-stack, non-string - write combining on string, all types - weak write ordering - */ - lo |= 31; - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - winchip2_protect_mcr(); + winchip2_unprotect_mcr(); + winchip2_create_optimal_mcr(); + rdmsr(MSR_IDT_MCR_CTRL, lo, hi); + /* + * Enable: + * write combining on non-stack, non-string + * write combining on string, all types + * weak write ordering + */ + lo |= 31; + wrmsr(MSR_IDT_MCR_CTRL, lo, hi); + winchip2_protect_mcr(); #endif - break; - case 9: - name = "3"; - fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; - fcr_clr = DPDC; + break; + case 9: + name = "3"; + fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| + E2MMX|EAMD3D; + fcr_clr = DPDC; #ifdef CONFIG_X86_OOSTORE - winchip2_unprotect_mcr(); - winchip2_create_optimal_mcr(); - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - /* Enable - write combining on non-stack, non-string - write combining on string, all types - weak write ordering - */ - lo |= 31; - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - winchip2_protect_mcr(); + winchip2_unprotect_mcr(); + winchip2_create_optimal_mcr(); + rdmsr(MSR_IDT_MCR_CTRL, lo, hi); + /* + * Enable: + * write combining on non-stack, non-string + * write combining on string, all types + * weak write ordering + */ + lo |= 31; + wrmsr(MSR_IDT_MCR_CTRL, lo, hi); + winchip2_protect_mcr(); #endif - break; - default: - name = "??"; - } + break; + default: + name = "??"; + } - rdmsr(MSR_IDT_FCR1, lo, hi); - newlo = (lo|fcr_set) & (~fcr_clr); + rdmsr(MSR_IDT_FCR1, lo, hi); + newlo = (lo|fcr_set) & (~fcr_clr); - if (newlo != lo) { - printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo); - wrmsr(MSR_IDT_FCR1, newlo, hi); - } else { - printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); - } - /* Emulate MTRRs using Centaur's MCR. */ - set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); - /* Report CX8 */ - set_bit(X86_FEATURE_CX8, c->x86_capability); - /* Set 3DNow! on Winchip 2 and above. */ - if (c->x86_model >= 8) - set_bit(X86_FEATURE_3DNOW, c->x86_capability); - /* See if we can find out some more. */ - if (cpuid_eax(0x80000000) >= 0x80000005) { - /* Yes, we can. */ - cpuid(0x80000005, &aa, &bb, &cc, &dd); - /* Add L1 data and code cache sizes. */ - c->x86_cache_size = (cc>>24)+(dd>>24); - } - sprintf(c->x86_model_id, "WinChip %s", name); - break; + if (newlo != lo) { + printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", + lo, newlo); + wrmsr(MSR_IDT_FCR1, newlo, hi); + } else { + printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); + } + /* Emulate MTRRs using Centaur's MCR. */ + set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); + /* Report CX8 */ + set_bit(X86_FEATURE_CX8, c->x86_capability); + /* Set 3DNow! on Winchip 2 and above. */ + if (c->x86_model >= 8) + set_bit(X86_FEATURE_3DNOW, c->x86_capability); + /* See if we can find out some more. */ + if (cpuid_eax(0x80000000) >= 0x80000005) { + /* Yes, we can. */ + cpuid(0x80000005, &aa, &bb, &cc, &dd); + /* Add L1 data and code cache sizes. */ + c->x86_cache_size = (cc>>24)+(dd>>24); + } + sprintf(c->x86_model_id, "WinChip %s", name); + break; case 6: - init_c3(c); - break; + init_c3(c); + break; } } -static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) +static unsigned int __cpuinit +centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* VIA C3 CPUs (670-68F) need further shifting. */ if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) size >>= 8; - /* VIA also screwed up Nehemiah stepping 1, and made - it return '65KB' instead of '64KB' - - Note, it seems this may only be in engineering samples. */ - if ((c->x86 == 6) && (c->x86_model == 9) && (c->x86_mask == 1) && (size == 65)) + /* + * There's also an erratum in Nehemiah stepping 1, which + * returns '65KB' instead of '64KB' + * - Note, it seems this may only be in engineering samples. + */ + if ((c->x86 == 6) && (c->x86_model == 9) && + (c->x86_mask == 1) && (size == 65)) size -= 1; return size; -- cgit v1.2.1 From c92a7a54d6579c9c01374092e7b61a6161f2ef70 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Sun, 17 Feb 2008 19:09:42 +0000 Subject: x86: reduce arch/x86/mm/ioremap.o size > Don't we have a special section for page-aligned data so it doesn't > waste most of two pages? We have .bss.page_aligned and it seems appropriate to use it. text data bss dec hex filename - 3388 8236 4 11628 2d6c ../build-32/arch/x86/mm/ioremap.o + 3388 48 4100 7536 1d70 ../build-32/arch/x86/mm/ioremap.o Signed-off-by: Ian Campbell Cc: Matt Mackall Cc: Sam Ravnborg Cc: Huang Ying Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/ioremap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 794895c6dcc9..868bbde74698 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -272,8 +272,8 @@ static int __init early_ioremap_debug_setup(char *str) early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; -static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] - __attribute__((aligned(PAGE_SIZE))); +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] + __section(.bss.page_aligned); static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { -- cgit v1.2.1 From bc0a733facbbde6c464e3ba5e165607fe4824cca Mon Sep 17 00:00:00 2001 From: "David P. Reed" Date: Mon, 18 Feb 2008 13:58:34 -0500 Subject: x86: define outb_pic and inb_pic to stop using outb_p and inb_p x86: define outb_pic and inb_pic to stop using outb_p and inb_p The delay between io port accesses to the PIC is now defined using outb_pic and inb_pic. This fix provides the next step, using udelay(2) to define the *PIC specific* timing requirements, rather than on bus-oriented timing, which is not well calibrated. Again, the primary reason for fixing this is to use proper delay strategy, and in particular to fix crashes that can result from using port 80 writes on machines that have resources on port 80, such as the ENE chips used by Quanta in latops it designs and sells to, e.g. HP. Signed-off-by: David P. Reed Signed-off-by: Ingo Molnar --- include/asm-x86/i8259.h | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 67c319e0efc7..e2650f21ca8d 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h @@ -1,6 +1,8 @@ #ifndef __ASM_I8259_H__ #define __ASM_I8259_H__ +#include + extern unsigned int cached_irq_mask; #define __byte(x,y) (((unsigned char *) &(y))[x]) @@ -29,7 +31,28 @@ extern void enable_8259A_irq(unsigned int irq); extern void disable_8259A_irq(unsigned int irq); extern unsigned int startup_8259A_irq(unsigned int irq); -#define inb_pic inb_p -#define outb_pic outb_p +/* the PIC may need a careful delay on some platforms, hence specific calls */ +static inline unsigned char inb_pic(unsigned int port) +{ + unsigned char value = inb(port); + + /* + * delay for some accesses to PIC on motherboard or in chipset + * must be at least one microsecond, so be safe here: + */ + udelay(2); + + return value; +} + +static inline void outb_pic(unsigned char value, unsigned int port) +{ + outb(value, port); + /* + * delay for some accesses to PIC on motherboard or in chipset + * must be at least one microsecond, so be safe here: + */ + udelay(2); +} #endif /* __ASM_I8259_H__ */ -- cgit v1.2.1 From 87253d1b4f2b5a29bdfc6275b9fb52a47d72df64 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Tue, 19 Feb 2008 11:12:30 +0000 Subject: x86: boot protocol updates Also update field names to simply payload_{offset,length} so as to not rule out uncompressed images. Signed-off-by: Ian Campbell Cc: H. Peter Anvin Cc: Jeremy Fitzhardinge Cc: virtualization@lists.linux-foundation.org Signed-off-by: Ingo Molnar --- Documentation/i386/boot.txt | 18 ++++++++++-------- arch/x86/boot/header.S | 6 ++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt index 05c24dfd7ecf..2eb16100bb3f 100644 --- a/Documentation/i386/boot.txt +++ b/Documentation/i386/boot.txt @@ -170,8 +170,8 @@ Offset Proto Name Meaning 0238/4 2.06+ cmdline_size Maximum size of the kernel command line 023C/4 2.07+ hardware_subarch Hardware subarchitecture 0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data -0248/4 2.08+ compressed_payload_offset -024C/4 2.08+ compressed_payload_length +0248/4 2.08+ payload_offset Offset of kernel payload +024C/4 2.08+ payload_length Length of kernel payload (1) For backwards compatibility, if the setup_sects field contains 0, the real value is 4. @@ -514,22 +514,24 @@ Protocol: 2.07+ A pointer to data that is specific to hardware subarch -Field name: compressed_payload_offset +Field name: payload_offset Type: read Offset/size: 0x248/4 Protocol: 2.08+ If non-zero then this field contains the offset from the end of the - real-mode code to the compressed payload. The compression format - should be determined using the standard magic number, currently only - gzip is used. + real-mode code to the payload. + + The payload may be compressed. The format of both the compressed and + uncompressed data should be determined using the standard magic + numbers. Currently only gzip compressed ELF is used. -Field name: compressed_payload_length +Field name: payload_length Type: read Offset/size: 0x24c/4 Protocol: 2.08+ - The length of the compressed payload. + The length of the payload. **** THE IMAGE CHECKSUM diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 40c91bb483e1..6d2df8d61c54 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -224,10 +224,8 @@ hardware_subarch: .long 0 # subarchitecture, added with 2.07 hardware_subarch_data: .quad 0 -compressed_payload_offset: - .long input_data -compressed_payload_length: - .long input_data_end-input_data +payload_offset: .long input_data +payload_length: .long input_data_end-input_data # End of setup header ##################################################### -- cgit v1.2.1 From e9406597884b5c2f196b124bdd6af47351562a9d Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 20:53:38 +0100 Subject: x86: coding style fixes to arch/x86/lib/memmove_64.c After the patch: total: 0 errors, 0 warnings, 21 lines checked no code changed: arch/x86/lib/memmove_64.o: text data bss dec hex filename 116 0 0 116 74 memmove_64.o.before 116 0 0 116 74 memmove_64.o.after md5: 2d6b0951cafb86a11a222cdd70f6104f memmove_64.o.before.asm 2d6b0951cafb86a11a222cdd70f6104f memmove_64.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/lib/memmove_64.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c index 80175e47b190..0a33909bf122 100644 --- a/arch/x86/lib/memmove_64.c +++ b/arch/x86/lib/memmove_64.c @@ -6,10 +6,10 @@ #include #undef memmove -void *memmove(void * dest,const void *src,size_t count) +void *memmove(void *dest, const void *src, size_t count) { - if (dest < src) { - return memcpy(dest,src,count); + if (dest < src) { + return memcpy(dest, src, count); } else { char *p = dest + count; const char *s = src + count; @@ -17,5 +17,5 @@ void *memmove(void * dest,const void *src,size_t count) *--p = *--s; } return dest; -} +} EXPORT_SYMBOL(memmove); -- cgit v1.2.1 From 325f86ec6d2ae4ab9879e745a92444f8bac20233 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 21:02:16 +0100 Subject: x86: coding style fixes to arch/x86/kernel/syscall_64.c After the patch: total: 0 errors, 1 warnings, 29 lines checked no code changed: arch/x86/kernel/syscall_64.o: text data bss dec hex filename 2304 0 0 2304 900 syscall_64.o.before 2304 0 0 2304 900 syscall_64.o.after md5: 0fdbb875cde8892296585226b92f4333 syscall_64.o.before.asm 0fdbb875cde8892296585226b92f4333 syscall_64.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/syscall_64.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c index 9d498c2f8eea..170d43c17487 100644 --- a/arch/x86/kernel/syscall_64.c +++ b/arch/x86/kernel/syscall_64.c @@ -1,4 +1,4 @@ -/* System call table for x86-64. */ +/* System call table for x86-64. */ #include #include @@ -7,20 +7,23 @@ #define __NO_STUBS -#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; +#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; #undef _ASM_X86_64_UNISTD_H_ #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [ nr ] = sym, +#define __SYSCALL(nr, sym) [nr] = sym, #undef _ASM_X86_64_UNISTD_H_ -typedef void (*sys_call_ptr_t)(void); +typedef void (*sys_call_ptr_t)(void); extern void sys_ni_syscall(void); const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { - /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ + /* + *Smells like a like a compiler bug -- it doesn't work + *when the & below is removed. + */ [0 ... __NR_syscall_max] = &sys_ni_syscall, #include }; -- cgit v1.2.1 From 8cf36d2bc5832da17a58c5f10adf2d92d138c992 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 23:09:59 +0100 Subject: x86: coding style fixes to arch/x86/lib/string_32.c The patch kills 45 errors and a few warnings. The file is now error/warning free: total: 0 errors, 0 warnings, 237 lines checked arch/x86/lib/string_32.c has no obvious style problems and is ready for submission. no code changed: arch/x86/lib/string_32.o: text data bss dec hex filename 639 0 0 639 27f string_32.o.before 639 0 0 639 27f string_32.o.after md5: 2db1c48187cf5113bb595153ee1fc73d string_32.o.before.asm 2db1c48187cf5113bb595153ee1fc73d string_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/lib/string_32.c | 60 ++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index c2c0504a3071..94972e7c094d 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c @@ -14,25 +14,25 @@ #include #ifdef __HAVE_ARCH_STRCPY -char *strcpy(char * dest,const char *src) +char *strcpy(char *dest, const char *src) { int d0, d1, d2; - asm volatile( "1:\tlodsb\n\t" + asm volatile("1:\tlodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b" : "=&S" (d0), "=&D" (d1), "=&a" (d2) - :"0" (src),"1" (dest) : "memory"); + :"0" (src), "1" (dest) : "memory"); return dest; } EXPORT_SYMBOL(strcpy); #endif #ifdef __HAVE_ARCH_STRNCPY -char *strncpy(char * dest,const char *src,size_t count) +char *strncpy(char *dest, const char *src, size_t count) { int d0, d1, d2, d3; - asm volatile( "1:\tdecl %2\n\t" + asm volatile("1:\tdecl %2\n\t" "js 2f\n\t" "lodsb\n\t" "stosb\n\t" @@ -42,17 +42,17 @@ char *strncpy(char * dest,const char *src,size_t count) "stosb\n" "2:" : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) - :"0" (src),"1" (dest),"2" (count) : "memory"); + :"0" (src), "1" (dest), "2" (count) : "memory"); return dest; } EXPORT_SYMBOL(strncpy); #endif #ifdef __HAVE_ARCH_STRCAT -char *strcat(char * dest,const char * src) +char *strcat(char *dest, const char *src) { int d0, d1, d2, d3; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "decl %1\n" "1:\tlodsb\n\t" @@ -67,10 +67,10 @@ EXPORT_SYMBOL(strcat); #endif #ifdef __HAVE_ARCH_STRNCAT -char *strncat(char * dest,const char * src,size_t count) +char *strncat(char *dest, const char *src, size_t count) { int d0, d1, d2, d3; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "decl %1\n\t" "movl %8,%3\n" @@ -83,7 +83,7 @@ char *strncat(char * dest,const char * src,size_t count) "2:\txorl %2,%2\n\t" "stosb" : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) - : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) + : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu), "g" (count) : "memory"); return dest; } @@ -91,11 +91,11 @@ EXPORT_SYMBOL(strncat); #endif #ifdef __HAVE_ARCH_STRCMP -int strcmp(const char * cs,const char * ct) +int strcmp(const char *cs, const char *ct) { int d0, d1; int res; - asm volatile( "1:\tlodsb\n\t" + asm volatile("1:\tlodsb\n\t" "scasb\n\t" "jne 2f\n\t" "testb %%al,%%al\n\t" @@ -106,7 +106,7 @@ int strcmp(const char * cs,const char * ct) "orb $1,%%al\n" "3:" :"=a" (res), "=&S" (d0), "=&D" (d1) - :"1" (cs),"2" (ct) + :"1" (cs), "2" (ct) :"memory"); return res; } @@ -114,11 +114,11 @@ EXPORT_SYMBOL(strcmp); #endif #ifdef __HAVE_ARCH_STRNCMP -int strncmp(const char * cs,const char * ct,size_t count) +int strncmp(const char *cs, const char *ct, size_t count) { int res; int d0, d1, d2; - asm volatile( "1:\tdecl %3\n\t" + asm volatile("1:\tdecl %3\n\t" "js 2f\n\t" "lodsb\n\t" "scasb\n\t" @@ -131,7 +131,7 @@ int strncmp(const char * cs,const char * ct,size_t count) "orb $1,%%al\n" "4:" :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) - :"1" (cs),"2" (ct),"3" (count) + :"1" (cs), "2" (ct), "3" (count) :"memory"); return res; } @@ -139,11 +139,11 @@ EXPORT_SYMBOL(strncmp); #endif #ifdef __HAVE_ARCH_STRCHR -char *strchr(const char * s, int c) +char *strchr(const char *s, int c) { int d0; - char * res; - asm volatile( "movb %%al,%%ah\n" + char *res; + asm volatile("movb %%al,%%ah\n" "1:\tlodsb\n\t" "cmpb %%ah,%%al\n\t" "je 2f\n\t" @@ -153,7 +153,7 @@ char *strchr(const char * s, int c) "2:\tmovl %1,%0\n\t" "decl %0" :"=a" (res), "=&S" (d0) - :"1" (s),"0" (c) + :"1" (s), "0" (c) :"memory"); return res; } @@ -161,16 +161,16 @@ EXPORT_SYMBOL(strchr); #endif #ifdef __HAVE_ARCH_STRLEN -size_t strlen(const char * s) +size_t strlen(const char *s) { int d0; int res; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "notl %0\n\t" "decl %0" :"=c" (res), "=&D" (d0) - :"1" (s),"a" (0), "0" (0xffffffffu) + :"1" (s), "a" (0), "0" (0xffffffffu) :"memory"); return res; } @@ -178,19 +178,19 @@ EXPORT_SYMBOL(strlen); #endif #ifdef __HAVE_ARCH_MEMCHR -void *memchr(const void *cs,int c,size_t count) +void *memchr(const void *cs, int c, size_t count) { int d0; void *res; if (!count) return NULL; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "je 1f\n\t" "movl $1,%0\n" "1:\tdecl %0" :"=D" (res), "=&c" (d0) - :"a" (c),"0" (cs),"1" (count) + :"a" (c), "0" (cs), "1" (count) :"memory"); return res; } @@ -198,7 +198,7 @@ EXPORT_SYMBOL(memchr); #endif #ifdef __HAVE_ARCH_MEMSCAN -void *memscan(void * addr, int c, size_t size) +void *memscan(void *addr, int c, size_t size) { if (!size) return addr; @@ -219,7 +219,7 @@ size_t strnlen(const char *s, size_t count) { int d0; int res; - asm volatile( "movl %2,%0\n\t" + asm volatile("movl %2,%0\n\t" "jmp 2f\n" "1:\tcmpb $0,(%0)\n\t" "je 3f\n\t" @@ -229,7 +229,7 @@ size_t strnlen(const char *s, size_t count) "jne 1b\n" "3:\tsubl %2,%0" :"=a" (res), "=&d" (d0) - :"c" (s),"1" (count) + :"c" (s), "1" (count) :"memory"); return res; } -- cgit v1.2.1 From 1577720524bab104eeb605c810963a2106cf4575 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 23:20:45 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/mcheck/p5.c The patch make the file errors free. Only 4 "WARNING: line over 80 characters" left. arch/x86/kernel/cpu/mcheck/p5.o: text data bss dec hex filename 452 0 4 456 1c8 p5.o.before 452 0 4 456 1c8 p5.o.after md5: 50c945ef150aa95bf0481cc3e1dc3315 p5.o.before.asm 50c945ef150aa95bf0481cc3e1dc3315 p5.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/p5.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index a18310aaae0c..bfa5817afdda 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -9,20 +9,20 @@ #include #include -#include +#include #include #include #include "mce.h" /* Machine check handler for Pentium class Intel */ -static void pentium_machine_check(struct pt_regs * regs, long error_code) +static void pentium_machine_check(struct pt_regs *regs, long error_code) { u32 loaddr, hi, lotype; rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); - if(lotype&(1<<5)) + if (lotype&(1<<5)) printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); add_taint(TAINT_MACHINE_CHECK); } @@ -31,13 +31,13 @@ static void pentium_machine_check(struct pt_regs * regs, long error_code) void intel_p5_mcheck_init(struct cpuinfo_x86 *c) { u32 l, h; - + /*Check for MCE support */ - if( !cpu_has(c, X86_FEATURE_MCE) ) - return; + if (!cpu_has(c, X86_FEATURE_MCE)) + return; /* Default P5 to off as its often misconnected */ - if(mce_disabled != -1) + if (mce_disabled != -1) return; machine_check_vector = pentium_machine_check; wmb(); @@ -47,7 +47,7 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) rdmsr(MSR_IA32_P5_MC_TYPE, l, h); printk(KERN_INFO "Intel old style machine check architecture supported.\n"); - /* Enable MCE */ + /* Enable MCE */ set_in_cr4(X86_CR4_MCE); printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); } -- cgit v1.2.1 From 8000a83f43f2d943eebeee9e070beb45949858ec Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 23:34:02 +0100 Subject: x86: coding style fixes to arch/x86/kernel/x8664_ksyms_64.c arch/x86/kernel/x8664_ksyms_64.o: text data bss dec hex filename 0 0 0 0 0 x8664_ksyms_64.o.before 0 0 0 0 0 x8664_ksyms_64.o.after md5: 2dd2d82a2b440a3c29b9ac9ce3221994 x8664_ksyms_64.o.before.asm 2dd2d82a2b440a3c29b9ac9ce3221994 x8664_ksyms_64.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/x8664_ksyms_64.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index a66e9c1a0537..e63d96823a16 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -35,15 +35,17 @@ EXPORT_SYMBOL(__copy_from_user_inatomic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); -/* Export string functions. We normally rely on gcc builtin for most of these, - but gcc sometimes decides not to inline them. */ +/* + * Export string functions. We normally rely on gcc builtin for most of these, + * but gcc sometimes decides not to inline them. + */ #undef memcpy #undef memset #undef memmove -extern void * memset(void *,int,__kernel_size_t); -extern void * memcpy(void *,const void *,__kernel_size_t); -extern void * __memcpy(void *,const void *,__kernel_size_t); +extern void *memset(void *, int, __kernel_size_t); +extern void *memcpy(void *, const void *, __kernel_size_t); +extern void *__memcpy(void *, const void *, __kernel_size_t); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); -- cgit v1.2.1 From 8b45b72b04061b9d39208b0843007d62b463c211 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 23:43:25 +0100 Subject: x86: coding style fixes to arch/x86/oprofile/op_model_ppro.c no code changed: arch/x86/oprofile/op_model_ppro.o: text data bss dec hex filename 1765 0 16 1781 6f5 op_model_ppro.o.before 1765 0 16 1781 6f5 op_model_ppro.o.after md5: 71c7f68d2197d686e3d1121bc18a6b6e op_model_ppro.o.before.asm 71c7f68d2197d686e3d1121bc18a6b6e op_model_ppro.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/oprofile/op_model_ppro.c | 52 +++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index c554f52cb808..eff431f6c57b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -1,4 +1,4 @@ -/** +/* * @file op_model_ppro.h * pentium pro / P6 model-specific MSR operations * @@ -15,45 +15,45 @@ #include #include #include - + #include "op_x86_model.h" #include "op_counter.h" #define NUM_COUNTERS 2 #define NUM_CONTROLS 2 -#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) -#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) -#define CTR_32BIT_WRITE(l,msrs,c) \ - do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0);} while (0) +#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) +#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) +#define CTR_32BIT_WRITE(l, msrs, c) \ + do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) -#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) -#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) -#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) +#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) +#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) +#define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) #define CTRL_CLEAR(x) (x &= (1<<21)) #define CTRL_SET_ENABLE(val) (val |= 1<<20) -#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) -#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) +#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) +#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) #define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT(val, e) (val |= e) static unsigned long reset_value[NUM_COUNTERS]; - + static void ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; - for (i=0; i < NUM_COUNTERS; i++) { + for (i = 0; i < NUM_COUNTERS; i++) { if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; else msrs->counters[i].addr = 0; } - - for (i=0; i < NUM_CONTROLS; i++) { + + for (i = 0; i < NUM_CONTROLS; i++) { if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; else @@ -69,23 +69,23 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { - if (unlikely(!CTRL_IS_RESERVED(msrs,i))) + if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); CTRL_CLEAR(low); CTRL_WRITE(low, high, msrs, i); } - + /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) { - if (unlikely(!CTR_IS_RESERVED(msrs,i))) + if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_32BIT_WRITE(1, msrs, i); } /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { - if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { + if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; CTR_32BIT_WRITE(counter_config[i].count, msrs, i); @@ -104,13 +104,13 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) } } - + static int ppro_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { unsigned int low, high; int i; - + for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) continue; @@ -135,10 +135,10 @@ static int ppro_check_ctrs(struct pt_regs * const regs, return 1; } - + static void ppro_start(struct op_msrs const * const msrs) { - unsigned int low,high; + unsigned int low, high; int i; for (i = 0; i < NUM_COUNTERS; ++i) { @@ -153,7 +153,7 @@ static void ppro_start(struct op_msrs const * const msrs) static void ppro_stop(struct op_msrs const * const msrs) { - unsigned int low,high; + unsigned int low, high; int i; for (i = 0; i < NUM_COUNTERS; ++i) { @@ -170,11 +170,11 @@ static void ppro_shutdown(struct op_msrs const * const msrs) int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (CTR_IS_RESERVED(msrs,i)) + if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_P6_PERFCTR0 + i); } for (i = 0 ; i < NUM_CONTROLS ; ++i) { - if (CTRL_IS_RESERVED(msrs,i)) + if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); } } -- cgit v1.2.1 From d4413732b54a17684e48fbb4b52fade5bf965b5d Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 23:51:27 +0100 Subject: x86: coding style fixes to arch/x86/oprofile/op_model_athlon.c The patch fixes 33 errors and a few warnings reported by checkpatch.pl arch/x86/oprofile/op_model_athlon.o: text data bss dec hex filename 1691 0 32 1723 6bb op_model_athlon.o.before 1691 0 32 1723 6bb op_model_athlon.o.after md5: c354bc2d7140e1e626c03390eddaa0a6 op_model_athlon.o.before.asm c354bc2d7140e1e626c03390eddaa0a6 op_model_athlon.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/oprofile/op_model_athlon.c | 46 ++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c index c3ee43333f26..3d534879a9dc 100644 --- a/arch/x86/oprofile/op_model_athlon.c +++ b/arch/x86/oprofile/op_model_athlon.c @@ -1,4 +1,4 @@ -/** +/* * @file op_model_athlon.h * athlon / K7 / K8 / Family 10h model-specific MSR operations * @@ -14,28 +14,28 @@ #include #include #include - + #include "op_x86_model.h" #include "op_counter.h" #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 -#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) -#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) -#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) +#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) +#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) +#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) -#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) -#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) -#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) +#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) +#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) +#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) #define CTRL_CLEAR_LO(x) (x &= (1<<21)) #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) #define CTRL_SET_ENABLE(val) (val |= 1<<20) -#define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) -#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) +#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) +#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) #define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) @@ -43,19 +43,19 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; - + static void athlon_fill_in_addresses(struct op_msrs * const msrs) { int i; - for (i=0; i < NUM_COUNTERS; i++) { + for (i = 0; i < NUM_COUNTERS; i++) { if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; else msrs->counters[i].addr = 0; } - for (i=0; i < NUM_CONTROLS; i++) { + for (i = 0; i < NUM_CONTROLS; i++) { if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; else @@ -63,15 +63,15 @@ static void athlon_fill_in_addresses(struct op_msrs * const msrs) } } - + static void athlon_setup_ctrs(struct op_msrs const * const msrs) { unsigned int low, high; int i; - + /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { - if (unlikely(!CTRL_IS_RESERVED(msrs,i))) + if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); @@ -81,14 +81,14 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) { - if (unlikely(!CTR_IS_RESERVED(msrs,i))) + if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { - if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { + if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; CTR_WRITE(counter_config[i].count, msrs, i); @@ -112,7 +112,7 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) } } - + static int athlon_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { @@ -133,7 +133,7 @@ static int athlon_check_ctrs(struct pt_regs * const regs, return 1; } - + static void athlon_start(struct op_msrs const * const msrs) { unsigned int low, high; @@ -150,7 +150,7 @@ static void athlon_start(struct op_msrs const * const msrs) static void athlon_stop(struct op_msrs const * const msrs) { - unsigned int low,high; + unsigned int low, high; int i; /* Subtle: stop on all counters to avoid race with @@ -169,11 +169,11 @@ static void athlon_shutdown(struct op_msrs const * const msrs) int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (CTR_IS_RESERVED(msrs,i)) + if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } for (i = 0 ; i < NUM_CONTROLS ; ++i) { - if (CTRL_IS_RESERVED(msrs,i)) + if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } } -- cgit v1.2.1 From 9b0c5028825df9d99a430c9963c45e02a2b185c6 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Tue, 19 Feb 2008 23:57:17 +0100 Subject: x86: coding style fixes to arch/x86/mach-generic/probe.c The patch kills 20 errors and a few warnings. arch/x86/mach-generic/probe.o: text data bss dec hex filename 578 45 0 623 26f probe.o.before 578 45 0 623 26f probe.o.after md5: 86eb1c3e3cf37f81d37bcd9a0c9f14ad probe.o.before.asm 86eb1c3e3cf37f81d37bcd9a0c9f14ad probe.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/probe.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index f410d3cb5659..c5ae751b994a 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -1,8 +1,9 @@ -/* Copyright 2003 Andi Kleen, SuSE Labs. - * Subject to the GNU Public License, v.2 - * +/* + * Copyright 2003 Andi Kleen, SuSE Labs. + * Subject to the GNU Public License, v.2 + * * Generic x86 APIC driver probe layer. - */ + */ #include #include #include @@ -24,7 +25,7 @@ struct genapic *genapic = &apic_default; static struct genapic *apic_probe[] __initdata = { &apic_summit, - &apic_bigsmp, + &apic_bigsmp, &apic_es7000, &apic_default, /* must be last */ NULL, @@ -69,7 +70,7 @@ void __init generic_bigsmp_probe(void) } void __init generic_apic_probe(void) -{ +{ if (!cmdline_apic) { int i; for (i = 0; apic_probe[i]; i++) { @@ -83,40 +84,40 @@ void __init generic_apic_probe(void) panic("Didn't find an APIC driver"); } printk(KERN_INFO "Using APIC driver %s\n", genapic->name); -} +} /* These functions can switch the APIC even after the initial ->probe() */ int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) -{ +{ int i; - for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { + for (i = 0; apic_probe[i]; ++i) { + if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) { if (!cmdline_apic) { genapic = apic_probe[i]; printk(KERN_INFO "Switched to APIC driver `%s'.\n", genapic->name); } return 1; - } - } + } + } return 0; -} +} int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) { int i; - for (i = 0; apic_probe[i]; ++i) { - if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { + for (i = 0; apic_probe[i]; ++i) { + if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { if (!cmdline_apic) { genapic = apic_probe[i]; printk(KERN_INFO "Switched to APIC driver `%s'.\n", genapic->name); } return 1; - } - } - return 0; + } + } + return 0; } int hard_smp_processor_id(void) -- cgit v1.2.1 From 651bbe16d05ea51fc102e1bf5765687ab32485c6 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Wed, 20 Feb 2008 00:02:58 +0100 Subject: x86: coding style fixes to arch/x86/mach-generic/default.c Just whitespace cleanups. total: 0 errors, 0 warnings, 26 lines checked arch/x86/mach-generic/default.c has no obvious style problems and is ready for submission. arch/x86/mach-generic/default.o: text data bss dec hex filename 1036 140 0 1176 498 default.o.before 1036 140 0 1176 498 default.o.after md5: f283c53022c568f3162bbcde44f65762 default.o.before.asm f283c53022c568f3162bbcde44f65762 default.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/default.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 1af0cc7648f0..9e835a11a13a 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c @@ -1,4 +1,4 @@ -/* +/* * Default generic APIC driver. This handles up to 8 CPUs. */ #define APIC_DEFINITION 1 @@ -19,8 +19,8 @@ /* should be called last. */ static int probe_default(void) -{ +{ return 1; -} +} -struct genapic apic_default = APIC_INIT("default", probe_default); +struct genapic apic_default = APIC_INIT("default", probe_default); -- cgit v1.2.1 From 8108576a78a68851de194d6ed2aa0a951de7bb7f Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Wed, 20 Feb 2008 00:17:34 +0100 Subject: x86: coding style fixes to arch/x86/mach-generic/summit.c File is now error/warning free. arch/x86/mach-generic/summit.o: text data bss dec hex filename 1481 140 0 1621 655 summit.o.before 1481 140 0 1621 655 summit.o.after md5: 7b7dc1cbd381af7b9393da989da5b0fd summit.o.before.asm 7b7dc1cbd381af7b9393da989da5b0fd summit.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/summit.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 74883ccb8f73..a97ea0f35b1e 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -1,4 +1,4 @@ -/* +/* * APIC driver for the IBM "Summit" chipset. */ #define APIC_DEFINITION 1 @@ -19,9 +19,9 @@ #include static int probe_summit(void) -{ +{ /* probed later in mptable/ACPI hooks */ return 0; -} +} -struct genapic apic_summit = APIC_INIT("summit", probe_summit); +struct genapic apic_summit = APIC_INIT("summit", probe_summit); -- cgit v1.2.1 From c99aa3804eecbeadabcf658a535e8a00d0f2b6e6 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Wed, 20 Feb 2008 00:18:05 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/nexgen.c arch/x86/kernel/cpu/nexgen.o: text data bss dec hex filename 111 316 0 427 1ab nexgen.o.before 111 316 0 427 1ab nexgen.o.after md5: e796efefea9ebc6644338bad226599ee nexgen.o.before.asm e796efefea9ebc6644338bad226599ee nexgen.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/nexgen.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c index 961fbe1a748f..5d5e1c134123 100644 --- a/arch/x86/kernel/cpu/nexgen.c +++ b/arch/x86/kernel/cpu/nexgen.c @@ -9,11 +9,11 @@ * Detect a NexGen CPU running without BIOS hypercode new enough * to have CPUID. (Thanks to Herbert Oppmann) */ - + static int __cpuinit deep_magic_nexgen_probe(void) { int ret; - + __asm__ __volatile__ ( " movw $0x5555, %%ax\n" " xorw %%dx,%%dx\n" @@ -22,22 +22,21 @@ static int __cpuinit deep_magic_nexgen_probe(void) " movl $0, %%eax\n" " jnz 1f\n" " movl $1, %%eax\n" - "1:\n" - : "=a" (ret) : : "cx", "dx" ); + "1:\n" + : "=a" (ret) : : "cx", "dx"); return ret; } -static void __cpuinit init_nexgen(struct cpuinfo_x86 * c) +static void __cpuinit init_nexgen(struct cpuinfo_x86 *c) { c->x86_cache_size = 256; /* A few had 1 MB... */ } -static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c) +static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c) { /* Detect NexGen with old hypercode */ - if ( deep_magic_nexgen_probe() ) { + if (deep_magic_nexgen_probe()) strcpy(c->x86_vendor_id, "NexGenDriven"); - } } static struct cpu_dev nexgen_cpu_dev __cpuinitdata = { -- cgit v1.2.1 From 637cba02618c1373eb9d2b70a53c17832e27d090 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Wed, 20 Feb 2008 00:18:52 +0100 Subject: x86: coding style fixes to arch/x86/mach-generic/bigsmp.c arch/x86/mach-generic/bigsmp.o: text data bss dec hex filename 1271 144 4 1419 58b bigsmp.o.before 1271 144 4 1419 58b bigsmp.o.after md5: b83aad375294c002c79b03c3a2e4be35 bigsmp.o.before.asm b83aad375294c002c79b03c3a2e4be35 bigsmp.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/mach-generic/bigsmp.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 292a225edabe..95fc463056d0 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -1,4 +1,4 @@ -/* +/* * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. * Drives the local APIC in "clustered mode". */ @@ -32,26 +32,26 @@ static int hp_ht_bigsmp(const struct dmi_system_id *d) static const struct dmi_system_id bigsmp_dmi_table[] = { - { hp_ht_bigsmp, "HP ProLiant DL760 G2", { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P44-"), - }}, - - { hp_ht_bigsmp, "HP ProLiant DL740", { - DMI_MATCH(DMI_BIOS_VENDOR, "HP"), - DMI_MATCH(DMI_BIOS_VERSION, "P47-"), - }}, + { hp_ht_bigsmp, "HP ProLiant DL760 G2", + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P44-"),} + }, + + { hp_ht_bigsmp, "HP ProLiant DL740", + { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), + DMI_MATCH(DMI_BIOS_VERSION, "P47-"),} + }, { } }; static int probe_bigsmp(void) -{ +{ if (def_to_bigsmp) - dmi_bigsmp = 1; + dmi_bigsmp = 1; else dmi_check_system(bigsmp_dmi_table); - return dmi_bigsmp; -} + return dmi_bigsmp; +} -struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); +struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); -- cgit v1.2.1 From 8fa6878ffc6366f490e99a1ab31127fb599657c9 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 20 Feb 2008 10:41:51 -0800 Subject: x86: split cpuinfo from setup_64.c into cpu/proc_64.c x86 /proc/cpuinfo code can be unified. This is the first step of unification. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/Makefile | 1 + arch/x86/kernel/cpu/proc_64.c | 126 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/setup_64.c | 120 ---------------------------------------- 3 files changed, 127 insertions(+), 120 deletions(-) create mode 100644 arch/x86/kernel/cpu/proc_64.c diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a0c4d7c5dbd7..8ba7d281fbc2 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_X86_32) += transmeta.o obj-$(CONFIG_X86_32) += intel.o obj-$(CONFIG_X86_32) += nexgen.o obj-$(CONFIG_X86_32) += umc.o +obj-$(CONFIG_X86_64) += proc_64.o obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ diff --git a/arch/x86/kernel/cpu/proc_64.c b/arch/x86/kernel/cpu/proc_64.c new file mode 100644 index 000000000000..bf4a94b4b0f0 --- /dev/null +++ b/arch/x86/kernel/cpu/proc_64.c @@ -0,0 +1,126 @@ +#include +#include +#include +#include +#include +#include + +/* + * Get CPU information for use by the procfs. + */ + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + struct cpuinfo_x86 *c = v; + int cpu = 0, i; + +#ifdef CONFIG_SMP + cpu = c->cpu_index; +#endif + + seq_printf(m, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %d\n" + "model name\t: %s\n", + (unsigned)cpu, + c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", + c->x86, + (int)c->x86_model, + c->x86_model_id[0] ? c->x86_model_id : "unknown"); + + if (c->x86_mask || c->cpuid_level >= 0) + seq_printf(m, "stepping\t: %d\n", c->x86_mask); + else + seq_printf(m, "stepping\t: unknown\n"); + + if (cpu_has(c, X86_FEATURE_TSC)) { + unsigned int freq = cpufreq_quick_get((unsigned)cpu); + + if (!freq) + freq = cpu_khz; + seq_printf(m, "cpu MHz\t\t: %u.%03u\n", + freq / 1000, (freq % 1000)); + } + + /* Cache size */ + if (c->x86_cache_size >= 0) + seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + +#ifdef CONFIG_SMP + if (smp_num_siblings * c->x86_max_cores > 1) { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpus_weight(per_cpu(cpu_core_map, cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } +#endif + + seq_printf(m, + "fpu\t\t: yes\n" + "fpu_exception\t: yes\n" + "cpuid level\t: %d\n" + "wp\t\t: yes\n" + "flags\t\t:", + c->cpuid_level); + + for (i = 0; i < 32*NCAPINTS; i++) + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) + seq_printf(m, " %s", x86_cap_flags[i]); + + seq_printf(m, "\nbogomips\t: %lu.%02lu\n", + c->loops_per_jiffy/(500000/HZ), + (c->loops_per_jiffy/(5000/HZ)) % 100); + + if (c->x86_tlbsize > 0) + seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); + seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); + seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); + + seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", + c->x86_phys_bits, c->x86_virt_bits); + + seq_printf(m, "power management:"); + for (i = 0; i < 32; i++) { + if (c->x86_power & (1 << i)) { + if (i < ARRAY_SIZE(x86_power_flags) && + x86_power_flags[i]) + seq_printf(m, "%s%s", + x86_power_flags[i][0]?" ":"", + x86_power_flags[i]); + else + seq_printf(m, " [%d]", i); + } + } + + seq_printf(m, "\n\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + if (*pos == 0) /* just in case, cpu 0 is not the first */ + *pos = first_cpu(cpu_online_map); + if ((*pos) < NR_CPUS && cpu_online(*pos)) + return &cpu_data(*pos); + return NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + *pos = next_cpu(*pos, cpu_online_map); + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index e67925674eae..187f084b9491 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -1068,123 +1068,3 @@ static __init int setup_disablecpuid(char *arg) return 1; } __setup("clearcpuid=", setup_disablecpuid); - -/* - * Get CPU information for use by the procfs. - */ - -static int show_cpuinfo(struct seq_file *m, void *v) -{ - struct cpuinfo_x86 *c = v; - int cpu = 0, i; - -#ifdef CONFIG_SMP - cpu = c->cpu_index; -#endif - - seq_printf(m, "processor\t: %u\n" - "vendor_id\t: %s\n" - "cpu family\t: %d\n" - "model\t\t: %d\n" - "model name\t: %s\n", - (unsigned)cpu, - c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", - c->x86, - (int)c->x86_model, - c->x86_model_id[0] ? c->x86_model_id : "unknown"); - - if (c->x86_mask || c->cpuid_level >= 0) - seq_printf(m, "stepping\t: %d\n", c->x86_mask); - else - seq_printf(m, "stepping\t: unknown\n"); - - if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get((unsigned)cpu); - - if (!freq) - freq = cpu_khz; - seq_printf(m, "cpu MHz\t\t: %u.%03u\n", - freq / 1000, (freq % 1000)); - } - - /* Cache size */ - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); - -#ifdef CONFIG_SMP - if (smp_num_siblings * c->x86_max_cores > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpus_weight(per_cpu(cpu_core_map, cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - } -#endif - - seq_printf(m, - "fpu\t\t: yes\n" - "fpu_exception\t: yes\n" - "cpuid level\t: %d\n" - "wp\t\t: yes\n" - "flags\t\t:", - c->cpuid_level); - - for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); - - seq_printf(m, "\nbogomips\t: %lu.%02lu\n", - c->loops_per_jiffy/(500000/HZ), - (c->loops_per_jiffy/(5000/HZ)) % 100); - - if (c->x86_tlbsize > 0) - seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); - seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); - seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); - - seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", - c->x86_phys_bits, c->x86_virt_bits); - - seq_printf(m, "power management:"); - for (i = 0; i < 32; i++) { - if (c->x86_power & (1 << i)) { - if (i < ARRAY_SIZE(x86_power_flags) && - x86_power_flags[i]) - seq_printf(m, "%s%s", - x86_power_flags[i][0]?" ":"", - x86_power_flags[i]); - else - seq_printf(m, " [%d]", i); - } - } - - seq_printf(m, "\n\n"); - - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - if (*pos == 0) /* just in case, cpu 0 is not the first */ - *pos = first_cpu(cpu_online_map); - if ((*pos) < NR_CPUS && cpu_online(*pos)) - return &cpu_data(*pos); - return NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - *pos = next_cpu(*pos, cpu_online_map); - return c_start(m, pos); -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; -- cgit v1.2.1 From a967ceac01cd3847011e2a777b8365b30afa770a Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 20 Feb 2008 10:45:29 -0800 Subject: x86: make cpu/proc|_64.c similar clean up for unification. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/proc.c | 120 ++++++++++++++++++++++++------------------ arch/x86/kernel/cpu/proc_64.c | 63 ++++++++++++---------- 2 files changed, 105 insertions(+), 78 deletions(-) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index af11d31dce0a..9bc3b04421cd 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -8,75 +8,90 @@ /* * Get CPU information for use by the procfs. */ +static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, + unsigned int cpu) +{ +#ifdef CONFIG_X86_HT + if (c->x86_max_cores * smp_num_siblings > 1) { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpus_weight(per_cpu(cpu_core_map, cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } +#endif +} + +static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) +{ + /* + * We use exception 16 if we have hardware math and we've either seen + * it or the CPU claims it is internal + */ + int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); + seq_printf(m, + "fdiv_bug\t: %s\n" + "hlt_bug\t\t: %s\n" + "f00f_bug\t: %s\n" + "coma_bug\t: %s\n" + "fpu\t\t: %s\n" + "fpu_exception\t: %s\n" + "cpuid level\t: %d\n" + "wp\t\t: %s\n", + c->fdiv_bug ? "yes" : "no", + c->hlt_works_ok ? "no" : "yes", + c->f00f_bug ? "yes" : "no", + c->coma_bug ? "yes" : "no", + c->hard_math ? "yes" : "no", + fpu_exception ? "yes" : "no", + c->cpuid_level, + c->wp_works_ok ? "yes" : "no"); +} + static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - int i, n = 0; - int fpu_exception; + unsigned int cpu = 0; + int i; #ifdef CONFIG_SMP - n = c->cpu_index; + cpu = c->cpu_index; #endif - seq_printf(m, "processor\t: %d\n" - "vendor_id\t: %s\n" - "cpu family\t: %d\n" - "model\t\t: %d\n" - "model name\t: %s\n", - n, - c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", - c->x86, - c->x86_model, - c->x86_model_id[0] ? c->x86_model_id : "unknown"); + seq_printf(m, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %u\n" + "model name\t: %s\n", + cpu, + c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", + c->x86, + c->x86_model, + c->x86_model_id[0] ? c->x86_model_id : "unknown"); if (c->x86_mask || c->cpuid_level >= 0) seq_printf(m, "stepping\t: %d\n", c->x86_mask); else seq_printf(m, "stepping\t: unknown\n"); - if ( cpu_has(c, X86_FEATURE_TSC) ) { - unsigned int freq = cpufreq_quick_get(n); + if (cpu_has(c, X86_FEATURE_TSC)) { + unsigned int freq = cpufreq_quick_get(cpu); + if (!freq) freq = cpu_khz; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", - freq / 1000, (freq % 1000)); + freq / 1000, (freq % 1000)); } /* Cache size */ if (c->x86_cache_size >= 0) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); -#ifdef CONFIG_X86_HT - if (c->x86_max_cores * smp_num_siblings > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpus_weight(per_cpu(cpu_core_map, n))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - } -#endif - - /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */ - fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); - seq_printf(m, "fdiv_bug\t: %s\n" - "hlt_bug\t\t: %s\n" - "f00f_bug\t: %s\n" - "coma_bug\t: %s\n" - "fpu\t\t: %s\n" - "fpu_exception\t: %s\n" - "cpuid level\t: %d\n" - "wp\t\t: %s\n" - "flags\t\t:", - c->fdiv_bug ? "yes" : "no", - c->hlt_works_ok ? "no" : "yes", - c->f00f_bug ? "yes" : "no", - c->coma_bug ? "yes" : "no", - c->hard_math ? "yes" : "no", - fpu_exception ? "yes" : "no", - c->cpuid_level, - c->wp_works_ok ? "yes" : "no"); - - for ( i = 0 ; i < 32*NCAPINTS ; i++ ) - if ( test_bit(i, c->x86_capability) && - x86_cap_flags[i] != NULL ) + + show_cpuinfo_core(m, c, cpu); + show_cpuinfo_misc(m, c); + + seq_printf(m, "flags\t\t:"); + for (i = 0; i < 32*NCAPINTS; i++) + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) seq_printf(m, " %s", x86_cap_flags[i]); for (i = 0; i < 32; i++) @@ -91,8 +106,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) } seq_printf(m, "\nbogomips\t: %lu.%02lu\n", - c->loops_per_jiffy/(500000/HZ), - (c->loops_per_jiffy/(5000/HZ)) % 100); + c->loops_per_jiffy/(500000/HZ), + (c->loops_per_jiffy/(5000/HZ)) % 100); seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size); return 0; @@ -106,14 +121,17 @@ static void *c_start(struct seq_file *m, loff_t *pos) return &cpu_data(*pos); return NULL; } + static void *c_next(struct seq_file *m, void *v, loff_t *pos) { *pos = next_cpu(*pos, cpu_online_map); return c_start(m, pos); } + static void c_stop(struct seq_file *m, void *v) { } + const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, diff --git a/arch/x86/kernel/cpu/proc_64.c b/arch/x86/kernel/cpu/proc_64.c index bf4a94b4b0f0..ce1b08f96820 100644 --- a/arch/x86/kernel/cpu/proc_64.c +++ b/arch/x86/kernel/cpu/proc_64.c @@ -8,25 +8,48 @@ /* * Get CPU information for use by the procfs. */ +static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, + unsigned int cpu) +{ +#ifdef CONFIG_SMP + if (c->x86_max_cores * smp_num_siblings > 1) { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpus_weight(per_cpu(cpu_core_map, cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } +#endif +} + +static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) +{ + seq_printf(m, + "fpu\t\t: yes\n" + "fpu_exception\t: yes\n" + "cpuid level\t: %d\n" + "wp\t\t: yes\n", + c->cpuid_level); +} static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - int cpu = 0, i; + unsigned int cpu = 0; + int i; #ifdef CONFIG_SMP cpu = c->cpu_index; #endif - seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" - "model\t\t: %d\n" + "model\t\t: %u\n" "model name\t: %s\n", - (unsigned)cpu, + cpu, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, - (int)c->x86_model, + c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); if (c->x86_mask || c->cpuid_level >= 0) @@ -35,7 +58,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "stepping\t: unknown\n"); if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get((unsigned)cpu); + unsigned int freq = cpufreq_quick_get(cpu); if (!freq) freq = cpu_khz; @@ -47,24 +70,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_cache_size >= 0) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); -#ifdef CONFIG_SMP - if (smp_num_siblings * c->x86_max_cores > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpus_weight(per_cpu(cpu_core_map, cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - } -#endif - - seq_printf(m, - "fpu\t\t: yes\n" - "fpu_exception\t: yes\n" - "cpuid level\t: %d\n" - "wp\t\t: yes\n" - "flags\t\t:", - c->cpuid_level); + show_cpuinfo_core(m, c, cpu); + show_cpuinfo_misc(m, c); + seq_printf(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) if (cpu_has(c, i) && x86_cap_flags[i] != NULL) seq_printf(m, " %s", x86_cap_flags[i]); @@ -119,8 +128,8 @@ static void c_stop(struct seq_file *m, void *v) } const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, }; -- cgit v1.2.1 From f84c3a429f83a98bb0b0fd7eed7ad1edc512b91c Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 20 Feb 2008 10:47:12 -0800 Subject: x86: add power management line in /proc/cpuinfo Change /proc/cpuinfo on 32-bit, it will look like on 64-bit. 'power management' line is added and power management information will be printed at the line. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/proc.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 9bc3b04421cd..fd3823a18c0b 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -94,7 +94,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has(c, i) && x86_cap_flags[i] != NULL) seq_printf(m, " %s", x86_cap_flags[i]); - for (i = 0; i < 32; i++) + seq_printf(m, "\nbogomips\t: %lu.%02lu\n", + c->loops_per_jiffy/(500000/HZ), + (c->loops_per_jiffy/(5000/HZ)) % 100); + seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); + + seq_printf(m, "power management:"); + for (i = 0; i < 32; i++) { if (c->x86_power & (1 << i)) { if (i < ARRAY_SIZE(x86_power_flags) && x86_power_flags[i]) @@ -104,11 +110,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) else seq_printf(m, " [%d]", i); } + } - seq_printf(m, "\nbogomips\t: %lu.%02lu\n", - c->loops_per_jiffy/(500000/HZ), - (c->loops_per_jiffy/(5000/HZ)) % 100); - seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size); + seq_printf(m, "\n\n"); return 0; } -- cgit v1.2.1 From 2aef77204e1e3a8ed6345727afbcb2c1efdf7fc0 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 20 Feb 2008 10:48:02 -0800 Subject: x86: cosmetic unification cpu/proc|_64.c make cpu/proc.c and cpu/proc_64.c same. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/proc.c | 36 +++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/proc_64.c | 49 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index fd3823a18c0b..15043a335ef1 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -8,6 +8,7 @@ /* * Get CPU information for use by the procfs. */ +#ifdef CONFIG_X86_32 static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, unsigned int cpu) { @@ -47,6 +48,31 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) c->cpuid_level, c->wp_works_ok ? "yes" : "no"); } +#else +static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, + unsigned int cpu) +{ +#ifdef CONFIG_SMP + if (c->x86_max_cores * smp_num_siblings > 1) { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpus_weight(per_cpu(cpu_core_map, cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } +#endif +} + +static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) +{ + seq_printf(m, + "fpu\t\t: yes\n" + "fpu_exception\t: yes\n" + "cpuid level\t: %d\n" + "wp\t\t: yes\n", + c->cpuid_level); +} +#endif static int show_cpuinfo(struct seq_file *m, void *v) { @@ -97,7 +123,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "\nbogomips\t: %lu.%02lu\n", c->loops_per_jiffy/(500000/HZ), (c->loops_per_jiffy/(5000/HZ)) % 100); + +#ifdef CONFIG_X86_64 + if (c->x86_tlbsize > 0) + seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); +#endif seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); +#ifdef CONFIG_X86_64 + seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); + seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", + c->x86_phys_bits, c->x86_virt_bits); +#endif seq_printf(m, "power management:"); for (i = 0; i < 32; i++) { diff --git a/arch/x86/kernel/cpu/proc_64.c b/arch/x86/kernel/cpu/proc_64.c index ce1b08f96820..15043a335ef1 100644 --- a/arch/x86/kernel/cpu/proc_64.c +++ b/arch/x86/kernel/cpu/proc_64.c @@ -8,6 +8,47 @@ /* * Get CPU information for use by the procfs. */ +#ifdef CONFIG_X86_32 +static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, + unsigned int cpu) +{ +#ifdef CONFIG_X86_HT + if (c->x86_max_cores * smp_num_siblings > 1) { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpus_weight(per_cpu(cpu_core_map, cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } +#endif +} + +static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) +{ + /* + * We use exception 16 if we have hardware math and we've either seen + * it or the CPU claims it is internal + */ + int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); + seq_printf(m, + "fdiv_bug\t: %s\n" + "hlt_bug\t\t: %s\n" + "f00f_bug\t: %s\n" + "coma_bug\t: %s\n" + "fpu\t\t: %s\n" + "fpu_exception\t: %s\n" + "cpuid level\t: %d\n" + "wp\t\t: %s\n", + c->fdiv_bug ? "yes" : "no", + c->hlt_works_ok ? "no" : "yes", + c->f00f_bug ? "yes" : "no", + c->coma_bug ? "yes" : "no", + c->hard_math ? "yes" : "no", + fpu_exception ? "yes" : "no", + c->cpuid_level, + c->wp_works_ok ? "yes" : "no"); +} +#else static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, unsigned int cpu) { @@ -31,6 +72,7 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) "wp\t\t: yes\n", c->cpuid_level); } +#endif static int show_cpuinfo(struct seq_file *m, void *v) { @@ -82,13 +124,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) c->loops_per_jiffy/(500000/HZ), (c->loops_per_jiffy/(5000/HZ)) % 100); +#ifdef CONFIG_X86_64 if (c->x86_tlbsize > 0) seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); - seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); +#endif + seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); +#ifdef CONFIG_X86_64 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); - seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", c->x86_phys_bits, c->x86_virt_bits); +#endif seq_printf(m, "power management:"); for (i = 0; i < 32; i++) { -- cgit v1.2.1 From eb19067d160416cd61fc92a8913ccfb3497b20b7 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 20 Feb 2008 10:48:55 -0800 Subject: x86: unify cpu/proc|_64.c Now cpu/proc.c and cpu/proc_64.c are same. So cpu/proc_64.c can be removed. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/Makefile | 5 +- arch/x86/kernel/cpu/proc_64.c | 180 ------------------------------------------ 2 files changed, 2 insertions(+), 183 deletions(-) delete mode 100644 arch/x86/kernel/cpu/proc_64.c diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 8ba7d281fbc2..ee7c45235e54 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -3,9 +3,9 @@ # obj-y := intel_cacheinfo.o addon_cpuid_features.o -obj-y += feature_names.o +obj-y += proc.o feature_names.o -obj-$(CONFIG_X86_32) += common.o proc.o bugs.o +obj-$(CONFIG_X86_32) += common.o bugs.o obj-$(CONFIG_X86_32) += amd.o obj-$(CONFIG_X86_32) += cyrix.o obj-$(CONFIG_X86_32) += centaur.o @@ -13,7 +13,6 @@ obj-$(CONFIG_X86_32) += transmeta.o obj-$(CONFIG_X86_32) += intel.o obj-$(CONFIG_X86_32) += nexgen.o obj-$(CONFIG_X86_32) += umc.o -obj-$(CONFIG_X86_64) += proc_64.o obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ diff --git a/arch/x86/kernel/cpu/proc_64.c b/arch/x86/kernel/cpu/proc_64.c deleted file mode 100644 index 15043a335ef1..000000000000 --- a/arch/x86/kernel/cpu/proc_64.c +++ /dev/null @@ -1,180 +0,0 @@ -#include -#include -#include -#include -#include -#include - -/* - * Get CPU information for use by the procfs. - */ -#ifdef CONFIG_X86_32 -static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) -{ -#ifdef CONFIG_X86_HT - if (c->x86_max_cores * smp_num_siblings > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpus_weight(per_cpu(cpu_core_map, cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - } -#endif -} - -static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) -{ - /* - * We use exception 16 if we have hardware math and we've either seen - * it or the CPU claims it is internal - */ - int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); - seq_printf(m, - "fdiv_bug\t: %s\n" - "hlt_bug\t\t: %s\n" - "f00f_bug\t: %s\n" - "coma_bug\t: %s\n" - "fpu\t\t: %s\n" - "fpu_exception\t: %s\n" - "cpuid level\t: %d\n" - "wp\t\t: %s\n", - c->fdiv_bug ? "yes" : "no", - c->hlt_works_ok ? "no" : "yes", - c->f00f_bug ? "yes" : "no", - c->coma_bug ? "yes" : "no", - c->hard_math ? "yes" : "no", - fpu_exception ? "yes" : "no", - c->cpuid_level, - c->wp_works_ok ? "yes" : "no"); -} -#else -static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) -{ -#ifdef CONFIG_SMP - if (c->x86_max_cores * smp_num_siblings > 1) { - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpus_weight(per_cpu(cpu_core_map, cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); - } -#endif -} - -static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) -{ - seq_printf(m, - "fpu\t\t: yes\n" - "fpu_exception\t: yes\n" - "cpuid level\t: %d\n" - "wp\t\t: yes\n", - c->cpuid_level); -} -#endif - -static int show_cpuinfo(struct seq_file *m, void *v) -{ - struct cpuinfo_x86 *c = v; - unsigned int cpu = 0; - int i; - -#ifdef CONFIG_SMP - cpu = c->cpu_index; -#endif - seq_printf(m, "processor\t: %u\n" - "vendor_id\t: %s\n" - "cpu family\t: %d\n" - "model\t\t: %u\n" - "model name\t: %s\n", - cpu, - c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", - c->x86, - c->x86_model, - c->x86_model_id[0] ? c->x86_model_id : "unknown"); - - if (c->x86_mask || c->cpuid_level >= 0) - seq_printf(m, "stepping\t: %d\n", c->x86_mask); - else - seq_printf(m, "stepping\t: unknown\n"); - - if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get(cpu); - - if (!freq) - freq = cpu_khz; - seq_printf(m, "cpu MHz\t\t: %u.%03u\n", - freq / 1000, (freq % 1000)); - } - - /* Cache size */ - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); - - show_cpuinfo_core(m, c, cpu); - show_cpuinfo_misc(m, c); - - seq_printf(m, "flags\t\t:"); - for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); - - seq_printf(m, "\nbogomips\t: %lu.%02lu\n", - c->loops_per_jiffy/(500000/HZ), - (c->loops_per_jiffy/(5000/HZ)) % 100); - -#ifdef CONFIG_X86_64 - if (c->x86_tlbsize > 0) - seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); -#endif - seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); -#ifdef CONFIG_X86_64 - seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); - seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", - c->x86_phys_bits, c->x86_virt_bits); -#endif - - seq_printf(m, "power management:"); - for (i = 0; i < 32; i++) { - if (c->x86_power & (1 << i)) { - if (i < ARRAY_SIZE(x86_power_flags) && - x86_power_flags[i]) - seq_printf(m, "%s%s", - x86_power_flags[i][0]?" ":"", - x86_power_flags[i]); - else - seq_printf(m, " [%d]", i); - } - } - - seq_printf(m, "\n\n"); - - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - if (*pos == 0) /* just in case, cpu 0 is not the first */ - *pos = first_cpu(cpu_online_map); - if ((*pos) < NR_CPUS && cpu_online(*pos)) - return &cpu_data(*pos); - return NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - *pos = next_cpu(*pos, cpu_online_map); - return c_start(m, pos); -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; -- cgit v1.2.1 From 4d46a89e7c867718020b2d5fd8f9e775293304be Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 04:24:40 +0100 Subject: x86: clean up include/asm-x86/processor.h basic style cleanup to flush out years of neglect: - consistent indentation - whitespace fixes - consistent comments Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 638 +++++++++++++++++++++++++------------------- 1 file changed, 358 insertions(+), 280 deletions(-) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 45a2f0ab33d0..43d2cc829a94 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -24,6 +24,7 @@ struct mm_struct; #include #include #include + #include #include #include @@ -37,16 +38,18 @@ struct mm_struct; static inline void *current_text_addr(void) { void *pc; - asm volatile("mov $1f,%0\n1:":"=r" (pc)); + + asm volatile("mov $1f, %0; 1:":"=r" (pc)); + return pc; } #ifdef CONFIG_X86_VSMP -#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) -#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) +# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) +# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) #else -#define ARCH_MIN_TASKALIGN 16 -#define ARCH_MIN_MMSTRUCT_ALIGN 0 +# define ARCH_MIN_TASKALIGN 16 +# define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif /* @@ -56,69 +59,81 @@ static inline void *current_text_addr(void) */ struct cpuinfo_x86 { - __u8 x86; /* CPU family */ - __u8 x86_vendor; /* CPU vendor */ - __u8 x86_model; - __u8 x86_mask; + __u8 x86; /* CPU family */ + __u8 x86_vendor; /* CPU vendor */ + __u8 x86_model; + __u8 x86_mask; #ifdef CONFIG_X86_32 - char wp_works_ok; /* It doesn't on 386's */ - char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ - char hard_math; - char rfu; - char fdiv_bug; - char f00f_bug; - char coma_bug; - char pad0; + char wp_works_ok; /* It doesn't on 386's */ + + /* Problems on some 486Dx4's and old 386's: */ + char hlt_works_ok; + char hard_math; + char rfu; + char fdiv_bug; + char f00f_bug; + char coma_bug; + char pad0; #else - /* number of 4K pages in DTLB/ITLB combined(in pages)*/ - int x86_tlbsize; - __u8 x86_virt_bits, x86_phys_bits; - /* cpuid returned core id bits */ - __u8 x86_coreid_bits; - /* Max extended CPUID function supported */ - __u32 extended_cpuid_level; + /* Number of 4K pages in DTLB/ITLB combined(in pages): */ + int x86_tlbsize; + __u8 x86_virt_bits; + __u8 x86_phys_bits; + /* CPUID returned core id bits: */ + __u8 x86_coreid_bits; + /* Max extended CPUID function supported: */ + __u32 extended_cpuid_level; #endif - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - __u32 x86_capability[NCAPINTS]; - char x86_vendor_id[16]; - char x86_model_id[64]; - int x86_cache_size; /* in KB - valid for CPUS which support this - call */ - int x86_cache_alignment; /* In bytes */ - int x86_power; - unsigned long loops_per_jiffy; + /* Maximum supported CPUID level, -1=no CPUID: */ + int cpuid_level; + __u32 x86_capability[NCAPINTS]; + char x86_vendor_id[16]; + char x86_model_id[64]; + /* in KB - valid for CPUS which support this call: */ + int x86_cache_size; + int x86_cache_alignment; /* In bytes */ + int x86_power; + unsigned long loops_per_jiffy; #ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ + /* cpus sharing the last level cache: */ + cpumask_t llc_shared_map; #endif - u16 x86_max_cores; /* cpuid returned max cores value */ - u16 apicid; - u16 x86_clflush_size; + /* cpuid returned max cores value: */ + u16 x86_max_cores; + u16 apicid; + u16 x86_clflush_size; #ifdef CONFIG_SMP - u16 booted_cores; /* number of cores as seen by OS */ - u16 phys_proc_id; /* Physical processor id. */ - u16 cpu_core_id; /* Core id */ - u16 cpu_index; /* index into per_cpu list */ + /* number of cores as seen by the OS: */ + u16 booted_cores; + /* Physical processor id: */ + u16 phys_proc_id; + /* Core id: */ + u16 cpu_core_id; + /* Index into per_cpu list: */ + u16 cpu_index; #endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); -#define X86_VENDOR_INTEL 0 -#define X86_VENDOR_CYRIX 1 -#define X86_VENDOR_AMD 2 -#define X86_VENDOR_UMC 3 -#define X86_VENDOR_NEXGEN 4 -#define X86_VENDOR_CENTAUR 5 -#define X86_VENDOR_TRANSMETA 7 -#define X86_VENDOR_NSC 8 -#define X86_VENDOR_NUM 9 -#define X86_VENDOR_UNKNOWN 0xff +#define X86_VENDOR_INTEL 0 +#define X86_VENDOR_CYRIX 1 +#define X86_VENDOR_AMD 2 +#define X86_VENDOR_UMC 3 +#define X86_VENDOR_NEXGEN 4 +#define X86_VENDOR_CENTAUR 5 +#define X86_VENDOR_TRANSMETA 7 +#define X86_VENDOR_NSC 8 +#define X86_VENDOR_NUM 9 + +#define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ -extern struct cpuinfo_x86 boot_cpu_data; -extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -extern __u32 cleared_cpu_caps[NCAPINTS]; +extern struct cpuinfo_x86 boot_cpu_data; +extern struct cpuinfo_x86 new_cpu_data; + +extern struct tss_struct doublefault_tss; +extern __u32 cleared_cpu_caps[NCAPINTS]; #ifdef CONFIG_SMP DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); @@ -129,7 +144,9 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); #define current_cpu_data boot_cpu_data #endif -void cpu_detect(struct cpuinfo_x86 *c); +#define cache_line_size() (boot_cpu_data.x86_cache_alignment) + +extern void cpu_detect(struct cpuinfo_x86 *c); extern void identify_cpu(struct cpuinfo_x86 *); extern void identify_boot_cpu(void); @@ -146,7 +163,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) + unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ __asm__("cpuid" @@ -165,54 +182,67 @@ static inline void load_cr3(pgd_t *pgdir) #ifdef CONFIG_X86_32 /* This is the TSS defined by the hardware. */ struct x86_hw_tss { - unsigned short back_link, __blh; - unsigned long sp0; - unsigned short ss0, __ss0h; - unsigned long sp1; - unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ - unsigned long sp2; - unsigned short ss2, __ss2h; - unsigned long __cr3; - unsigned long ip; - unsigned long flags; - unsigned long ax, cx, dx, bx; - unsigned long sp, bp, si, di; - unsigned short es, __esh; - unsigned short cs, __csh; - unsigned short ss, __ssh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; - unsigned short ldt, __ldth; - unsigned short trace, io_bitmap_base; + unsigned short back_link, __blh; + unsigned long sp0; + unsigned short ss0, __ss0h; + unsigned long sp1; + /* ss1 caches MSR_IA32_SYSENTER_CS: */ + unsigned short ss1, __ss1h; + unsigned long sp2; + unsigned short ss2, __ss2h; + unsigned long __cr3; + unsigned long ip; + unsigned long flags; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long bx; + unsigned long sp; + unsigned long bp; + unsigned long si; + unsigned long di; + unsigned short es, __esh; + unsigned short cs, __csh; + unsigned short ss, __ssh; + unsigned short ds, __dsh; + unsigned short fs, __fsh; + unsigned short gs, __gsh; + unsigned short ldt, __ldth; + unsigned short trace; + unsigned short io_bitmap_base; + } __attribute__((packed)); #else struct x86_hw_tss { - u32 reserved1; - u64 sp0; - u64 sp1; - u64 sp2; - u64 reserved2; - u64 ist[7]; - u32 reserved3; - u32 reserved4; - u16 reserved5; - u16 io_bitmap_base; + u32 reserved1; + u64 sp0; + u64 sp1; + u64 sp2; + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; + } __attribute__((packed)) ____cacheline_aligned; #endif /* - * Size of io_bitmap. + * IO-bitmap sizes: */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 -#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 +#define IO_BITMAP_BITS 65536 +#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) +#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) +#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define INVALID_IO_BITMAP_OFFSET 0x8000 +#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct tss_struct { - struct x86_hw_tss x86_tss; + /* + * The hardware state: + */ + struct x86_hw_tss x86_tss; /* * The extra 1 is there because the CPU will access an @@ -220,48 +250,54 @@ struct tss_struct { * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; + unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ - unsigned long io_bitmap_max; - struct thread_struct *io_bitmap_owner; + unsigned long io_bitmap_max; + struct thread_struct *io_bitmap_owner; + /* - * pads the TSS to be cacheline-aligned (size is 0x100) + * Pad the TSS to be cacheline-aligned (size is 0x100): */ - unsigned long __cacheline_filler[35]; + unsigned long __cacheline_filler[35]; /* - * .. and then another 0x100 bytes for emergency kernel stack + * .. and then another 0x100 bytes for the emergency kernel stack: */ - unsigned long stack[64]; + unsigned long stack[64]; + } __attribute__((packed)); DECLARE_PER_CPU(struct tss_struct, init_tss); -/* Save the original ist values for checking stack pointers during debugging */ +/* + * Save the original ist values for checking stack pointers during debugging + */ struct orig_ist { - unsigned long ist[7]; + unsigned long ist[7]; }; #define MXCSR_DEFAULT 0x1f80 struct i387_fsave_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - u32 status; /* software status information */ + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 st_space[20]; + /* Software status information: */ + u32 status; }; struct i387_fxsave_struct { - u16 cwd; - u16 swd; - u16 twd; - u16 fop; + u16 cwd; + u16 swd; + u16 twd; + u16 fop; union { struct { u64 rip; @@ -274,31 +310,40 @@ struct i387_fxsave_struct { u32 fos; }; }; - u32 mxcsr; - u32 mxcsr_mask; - u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ - u32 padding[24]; + u32 mxcsr; + u32 mxcsr_mask; + /* 8*16 bytes for each FP-reg = 128 bytes: */ + u32 st_space[32]; + /* 16*16 bytes for each XMM-reg = 256 bytes: */ + u32 xmm_space[64]; + u32 padding[24]; + } __attribute__((aligned(16))); struct i387_soft_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - u8 ftop, changed, lookahead, no_update, rm, alimit; - struct info *info; - u32 entry_eip; + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 st_space[20]; + u8 ftop; + u8 changed; + u8 lookahead; + u8 no_update; + u8 rm; + u8 alimit; + struct info *info; + u32 entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; - struct i387_soft_struct soft; + struct i387_soft_struct soft; }; #ifdef CONFIG_X86_32 @@ -313,42 +358,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; struct thread_struct { -/* cached TLS descriptors. */ - struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; - unsigned long sp0; - unsigned long sp; + /* Cached TLS descriptors: */ + struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; + unsigned long sp0; + unsigned long sp; #ifdef CONFIG_X86_32 - unsigned long sysenter_cs; + unsigned long sysenter_cs; #else - unsigned long usersp; /* Copy from PDA */ - unsigned short es, ds, fsindex, gsindex; + unsigned long usersp; /* Copy from PDA */ + unsigned short es; + unsigned short ds; + unsigned short fsindex; + unsigned short gsindex; #endif - unsigned long ip; - unsigned long fs; - unsigned long gs; -/* Hardware debugging registers */ - unsigned long debugreg0; - unsigned long debugreg1; - unsigned long debugreg2; - unsigned long debugreg3; - unsigned long debugreg6; - unsigned long debugreg7; -/* fault info */ - unsigned long cr2, trap_no, error_code; -/* floating point info */ + unsigned long ip; + unsigned long fs; + unsigned long gs; + /* Hardware debugging registers: */ + unsigned long debugreg0; + unsigned long debugreg1; + unsigned long debugreg2; + unsigned long debugreg3; + unsigned long debugreg6; + unsigned long debugreg7; + /* Fault info: */ + unsigned long cr2; + unsigned long trap_no; + unsigned long error_code; + /* Floating point info: */ union i387_union i387 __attribute__((aligned(16)));; #ifdef CONFIG_X86_32 -/* virtual 86 mode info */ + /* Virtual 86 mode info */ struct vm86_struct __user *vm86_info; unsigned long screen_bitmap; - unsigned long v86flags, v86mask, saved_sp0; - unsigned int saved_fs, saved_gs; + unsigned long v86flags; + unsigned long v86mask; + unsigned long saved_sp0; + unsigned int saved_fs; + unsigned int saved_gs; #endif -/* IO permissions */ - unsigned long *io_bitmap_ptr; - unsigned long iopl; -/* max allowed port in the bitmap, in bytes: */ - unsigned io_bitmap_max; + /* IO permissions: */ + unsigned long *io_bitmap_ptr; + unsigned long iopl; + /* Max allowed port in the bitmap, in bytes: */ + unsigned io_bitmap_max; /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ unsigned long debugctlmsr; /* Debug Store - if not 0 points to a DS Save Area configuration; @@ -358,7 +411,7 @@ struct thread_struct { static inline unsigned long native_get_debugreg(int regno) { - unsigned long val = 0; /* Damn you, gcc! */ + unsigned long val = 0; /* Damn you, gcc! */ switch (regno) { case 0: @@ -383,22 +436,22 @@ static inline void native_set_debugreg(int regno, unsigned long value) { switch (regno) { case 0: - asm("mov %0,%%db0" : /* no output */ :"r" (value)); + asm("mov %0, %%db0" ::"r" (value)); break; case 1: - asm("mov %0,%%db1" : /* no output */ :"r" (value)); + asm("mov %0, %%db1" ::"r" (value)); break; case 2: - asm("mov %0,%%db2" : /* no output */ :"r" (value)); + asm("mov %0, %%db2" ::"r" (value)); break; case 3: - asm("mov %0,%%db3" : /* no output */ :"r" (value)); + asm("mov %0, %%db3" ::"r" (value)); break; case 6: - asm("mov %0,%%db6" : /* no output */ :"r" (value)); + asm("mov %0, %%db6" ::"r" (value)); break; case 7: - asm("mov %0,%%db7" : /* no output */ :"r" (value)); + asm("mov %0, %%db7" ::"r" (value)); break; default: BUG(); @@ -412,6 +465,7 @@ static inline void native_set_iopl_mask(unsigned mask) { #ifdef CONFIG_X86_32 unsigned int reg; + __asm__ __volatile__ ("pushfl;" "popl %0;" "andl %1, %0;" @@ -423,12 +477,12 @@ static inline void native_set_iopl_mask(unsigned mask) #endif } -static inline void native_load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void +native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { tss->x86_tss.sp0 = thread->sp0; #ifdef CONFIG_X86_32 - /* Only happens when SEP is enabled, no need to test "SEP"arately */ + /* Only happens when SEP is enabled, no need to test "SEP"arately: */ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { tss->x86_tss.ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); @@ -446,8 +500,8 @@ static inline void native_swapgs(void) #ifdef CONFIG_PARAVIRT #include #else -#define __cpuid native_cpuid -#define paravirt_enabled() 0 +#define __cpuid native_cpuid +#define paravirt_enabled() 0 /* * These special macros can be used to get or set a debugging register @@ -457,8 +511,8 @@ static inline void native_swapgs(void) #define set_debugreg(value, register) \ native_set_debugreg(register, value) -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void +load_sp0(struct tss_struct *tss, struct thread_struct *thread) { native_load_sp0(tss, thread); } @@ -473,11 +527,12 @@ static inline void load_sp0(struct tss_struct *tss, * enable), so that any CPU's that boot up * after us can get the correct flags. */ -extern unsigned long mmu_cr4_features; +extern unsigned long mmu_cr4_features; static inline void set_in_cr4(unsigned long mask) { unsigned cr4; + mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; @@ -487,6 +542,7 @@ static inline void set_in_cr4(unsigned long mask) static inline void clear_in_cr4(unsigned long mask) { unsigned cr4; + mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; @@ -494,42 +550,42 @@ static inline void clear_in_cr4(unsigned long mask) } struct microcode_header { - unsigned int hdrver; - unsigned int rev; - unsigned int date; - unsigned int sig; - unsigned int cksum; - unsigned int ldrver; - unsigned int pf; - unsigned int datasize; - unsigned int totalsize; - unsigned int reserved[3]; + unsigned int hdrver; + unsigned int rev; + unsigned int date; + unsigned int sig; + unsigned int cksum; + unsigned int ldrver; + unsigned int pf; + unsigned int datasize; + unsigned int totalsize; + unsigned int reserved[3]; }; struct microcode { - struct microcode_header hdr; - unsigned int bits[0]; + struct microcode_header hdr; + unsigned int bits[0]; }; -typedef struct microcode microcode_t; -typedef struct microcode_header microcode_header_t; +typedef struct microcode microcode_t; +typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { - unsigned int sig; - unsigned int pf; - unsigned int cksum; + unsigned int sig; + unsigned int pf; + unsigned int cksum; }; struct extended_sigtable { - unsigned int count; - unsigned int cksum; - unsigned int reserved[3]; + unsigned int count; + unsigned int cksum; + unsigned int reserved[3]; struct extended_signature sigs[0]; }; typedef struct { - unsigned long seg; + unsigned long seg; } mm_segment_t; @@ -541,7 +597,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ +/* Prepare to copy thread state - unlazy all lazy state */ extern void prepare_to_copy(struct task_struct *tsk); unsigned long get_wchan(struct task_struct *p); @@ -578,118 +634,131 @@ static inline unsigned int cpuid_eax(unsigned int op) unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return eax; } + static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return ebx; } + static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return ecx; } + static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, ebx, ecx, edx; cpuid(op, &eax, &ebx, &ecx, &edx); + return edx; } /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { - __asm__ __volatile__("rep;nop": : :"memory"); + __asm__ __volatile__("rep; nop" ::: "memory"); } -/* Stop speculative execution */ +static inline void cpu_relax(void) +{ + rep_nop(); +} + +/* Stop speculative execution: */ static inline void sync_core(void) { int tmp; + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx", "ecx", "edx", "memory"); } -#define cpu_relax() rep_nop() - -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) +static inline void +__monitor(const void *eax, unsigned long ecx, unsigned long edx) { - /* "monitor %eax,%ecx,%edx;" */ + /* "monitor %eax, %ecx, %edx;" */ asm volatile( - ".byte 0x0f,0x01,0xc8;" - : :"a" (eax), "c" (ecx), "d"(edx)); + ".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { - /* "mwait %eax,%ecx;" */ + /* "mwait %eax, %ecx;" */ asm volatile( - ".byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); + ".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { - /* "mwait %eax,%ecx;" */ + /* "mwait %eax, %ecx;" */ asm volatile( - "sti; .byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); + "sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); -extern int force_mwait; +extern int force_mwait; extern void select_idle_routine(const struct cpuinfo_x86 *c); -extern unsigned long boot_option_idle_override; +extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); /* Defined in head.S */ -extern struct desc_ptr early_gdt_descr; +extern struct desc_ptr early_gdt_descr; extern void cpu_set_gdt(int); extern void switch_to_new_gdt(void); extern void cpu_init(void); extern void init_gdt(int cpu); -/* from system description table in BIOS. Mostly for MCA use, but - * others may find it useful. */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; +/* + * from system description table in BIOS. Mostly for MCA use, but + * others may find it useful: + */ +extern unsigned int machine_id; +extern unsigned int machine_submodel_id; +extern unsigned int BIOS_revision; -/* Boot loader type from the setup header */ -extern int bootloader_type; +/* Boot loader type from the setup header: */ +extern int bootloader_type; -extern char ignore_fpu_irq; -#define cache_line_size() (boot_cpu_data.x86_cache_alignment) +extern char ignore_fpu_irq; #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH #ifdef CONFIG_X86_32 -#define BASE_PREFETCH ASM_NOP4 -#define ARCH_HAS_PREFETCH +# define BASE_PREFETCH ASM_NOP4 +# define ARCH_HAS_PREFETCH #else -#define BASE_PREFETCH "prefetcht0 (%1)" +# define BASE_PREFETCH "prefetcht0 (%1)" #endif -/* Prefetch instructions for Pentium III and AMD Athlon */ -/* It's not worth to care about 3dnow! prefetches for the K6 - because they are microcoded there and very slow. - However we don't do prefetches for pre XP Athlons currently - That should be fixed. */ +/* + * Prefetch instructions for Pentium III (+) and AMD Athlon (+) + * + * It's not worth to care about 3dnow prefetches for the K6 + * because they are microcoded there and very slow. + */ static inline void prefetch(const void *x) { alternative_input(BASE_PREFETCH, @@ -698,8 +767,11 @@ static inline void prefetch(const void *x) "r" (x)); } -/* 3dnow! prefetch to get an exclusive cache line. Useful for - spinlocks to avoid one state transition in the cache coherency protocol. */ +/* + * 3dnow prefetch to get an exclusive cache line. + * Useful for spinlocks to avoid one state transition in the + * cache coherency protocol: + */ static inline void prefetchw(const void *x) { alternative_input(BASE_PREFETCH, @@ -708,21 +780,25 @@ static inline void prefetchw(const void *x) "r" (x)); } -#define spin_lock_prefetch(x) prefetchw(x) +static inline void spin_lock_prefetch(const void *x) +{ + prefetchw(x); +} + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). */ -#define TASK_SIZE (PAGE_OFFSET) -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP - -#define INIT_THREAD { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .vm86_info = NULL, \ - .sysenter_cs = __KERNEL_CS, \ - .io_bitmap_ptr = NULL, \ - .fs = __KERNEL_PERCPU, \ +#define TASK_SIZE PAGE_OFFSET +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP + +#define INIT_THREAD { \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + .io_bitmap_ptr = NULL, \ + .fs = __KERNEL_PERCPU, \ } /* @@ -731,26 +807,27 @@ static inline void prefetchw(const void *x) * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ -#define INIT_TSS { \ - .x86_tss = { \ +#define INIT_TSS { \ + .x86_tss = { \ .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ - }, \ - .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ + }, \ + .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ } -#define start_thread(regs, new_eip, new_esp) do { \ +#define start_thread(regs, new_eip, new_esp) \ +do { \ __asm__("movl %0,%%gs": :"r" (0)); \ - regs->fs = 0; \ + regs->fs = 0; \ set_fs(USER_DS); \ - regs->ds = __USER_DS; \ - regs->es = __USER_DS; \ - regs->ss = __USER_DS; \ - regs->cs = __USER_CS; \ - regs->ip = new_eip; \ - regs->sp = new_esp; \ + regs->ds = __USER_DS; \ + regs->es = __USER_DS; \ + regs->ss = __USER_DS; \ + regs->cs = __USER_CS; \ + regs->ip = new_eip; \ + regs->sp = new_esp; \ } while (0) @@ -780,24 +857,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); __regs__ - 1; \ }) -#define KSTK_ESP(task) (task_pt_regs(task)->sp) +#define KSTK_ESP(task) (task_pt_regs(task)->sp) #else /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE64 (0x800000000000UL - 4096) +#define TASK_SIZE64 (0x800000000000UL - 4096) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ - 0xc0000000 : 0xFFFFe000) +#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ + 0xc0000000 : 0xFFFFe000) -#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ - IA32_PAGE_OFFSET : TASK_SIZE64) -#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ - IA32_PAGE_OFFSET : TASK_SIZE64) +#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE64) +#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ + IA32_PAGE_OFFSET : TASK_SIZE64) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX TASK_SIZE64 @@ -813,12 +890,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define start_thread(regs, new_rip, new_rsp) do { \ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ load_gs_index(0); \ - (regs)->ip = (new_rip); \ - (regs)->sp = (new_rsp); \ + (regs)->ip = (new_rip); \ + (regs)->sp = (new_rsp); \ write_pda(oldrsp, (new_rsp)); \ - (regs)->cs = __USER_CS; \ - (regs)->ss = __USER_DS; \ - (regs)->flags = 0x200; \ + (regs)->cs = __USER_CS; \ + (regs)->ss = __USER_DS; \ + (regs)->flags = 0x200; \ set_fs(USER_DS); \ } while (0) @@ -826,17 +903,18 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. */ -#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) +#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) -#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ +#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) +#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ #endif /* CONFIG_X86_64 */ -/* This decides where the kernel will search for a free chunk of vm +/* + * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) -#define KSTK_EIP(task) (task_pt_regs(task)->ip) +#define KSTK_EIP(task) (task_pt_regs(task)->ip) #endif -- cgit v1.2.1 From 2c5847837fe76497934734330151f240f3e04925 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Thu, 21 Feb 2008 00:10:54 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/mcheck/p6.c Before: total: 16 errors, 13 warnings, 122 lines checked After: total: 0 errors, 0 warnings, 122 lines checked No code changed: arch/x86/kernel/cpu/mcheck/p6.o: text data bss dec hex filename 1082 0 8 1090 442 p6.o.before 1082 0 8 1090 442 p6.o.after md5: 4e283fbc1b68240f1724d9725007d379 p6.o.before.asm 4e283fbc1b68240f1724d9725007d379 p6.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/p6.c | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c index 74342604d30e..62efc9c2b3af 100644 --- a/arch/x86/kernel/cpu/mcheck/p6.c +++ b/arch/x86/kernel/cpu/mcheck/p6.c @@ -9,23 +9,23 @@ #include #include -#include +#include #include #include #include "mce.h" /* Machine Check Handler For PII/PIII */ -static void intel_machine_check(struct pt_regs * regs, long error_code) +static void intel_machine_check(struct pt_regs *regs, long error_code) { - int recover=1; + int recover = 1; u32 alow, ahigh, high, low; u32 mcgstl, mcgsth; int i; - rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); + rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); if (mcgstl & (1<<0)) /* Recoverable ? */ - recover=0; + recover = 0; printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", smp_processor_id(), mcgsth, mcgstl); @@ -55,30 +55,30 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) } if (recover & 2) - panic ("CPU context corrupt"); + panic("CPU context corrupt"); if (recover & 1) - panic ("Unable to continue"); + panic("Unable to continue"); - printk (KERN_EMERG "Attempting to continue.\n"); - /* - * Do not clear the MSR_IA32_MCi_STATUS if the error is not + printk(KERN_EMERG "Attempting to continue.\n"); + /* + * Do not clear the MSR_IA32_MCi_STATUS if the error is not * recoverable/continuable.This will allow BIOS to look at the MSRs * for errors if the OS could not log the error. */ - for (i=0; i Date: Thu, 21 Feb 2008 00:18:34 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/umc.c Before: total: 3 errors, 1 warnings, 23 lines checked After: total: 0 errors, 0 warnings, 25 lines checked No code changed: arch/x86/kernel/cpu/umc.o: text data bss dec hex filename 24 616 0 640 280 umc.o.before 24 616 0 640 280 umc.o.after md5: e8daa3eaed0963a0cdd2e83c2e1f9823 umc.o.before.asm e8daa3eaed0963a0cdd2e83c2e1f9823 umc.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/umc.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index b1acf08245fb..b1fc90989d75 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c @@ -3,17 +3,19 @@ #include #include "cpu.h" -/* UMC chips appear to be only either 386 or 486, so no special init takes place. +/* + * UMC chips appear to be only either 386 or 486, + * so no special init takes place. */ static struct cpu_dev umc_cpu_dev __cpuinitdata = { .c_vendor = "UMC", - .c_ident = { "UMC UMC UMC" }, + .c_ident = { "UMC UMC UMC" }, .c_models = { { .vendor = X86_VENDOR_UMC, .family = 4, .model_names = - { - [1] = "U5D", - [2] = "U5S", + { + [1] = "U5D", + [2] = "U5S", } }, }, -- cgit v1.2.1 From fd77c7cabd71ab0c31758f5faf1b92b66e9fe461 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Thu, 21 Feb 2008 00:19:10 +0100 Subject: x86: coding style fixes to arch/x86/boot/compressed/misc.c Fix lots of style errors and warnings. Before: total: 58 errors, 9 warnings, 469 lines checked After: total: 7 errors, 8 warnings, 471 lines checked No code changed: arch/x86/boot/compressed/misc.o: text data bss dec hex filename 10716 8 2152 12876 324c misc.o.before 10716 8 2152 12876 324c misc.o.after md5: 2c20c903986a3c9bca44306c6646067e misc.o.before.asm 2c20c903986a3c9bca44306c6646067e misc.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/misc.c | 66 +++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 69aec2f4155d..50d054c29018 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -127,7 +127,8 @@ typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; -#define WSIZE 0x80000000 /* Window size must be at least 32k, +#define WSIZE 0x80000000 /* + * Window size must be at least 32k, * and a power of two * We don't actually have a window just * a huge output buffer so I report @@ -152,22 +153,22 @@ static unsigned outcnt; /* bytes in output buffer */ #define RESERVED 0xC0 /* bit 6,7: reserved */ #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) - + /* Diagnostic functions */ #ifdef DEBUG -# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Assert(cond, msg) {if(!(cond)) error(msg); } # define Trace(x) fprintf x -# define Tracev(x) {if (verbose) fprintf x ;} -# define Tracevv(x) {if (verbose>1) fprintf x ;} -# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} -# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +# define Tracev(x) {if (verbose) fprintf x ; } +# define Tracevv(x) {if (verbose > 1) fprintf x ; } +# define Tracec(c, x) {if (verbose && (c)) fprintf x ; } +# define Tracecv(c, x) {if (verbose > 1 && (c)) fprintf x ; } #else -# define Assert(cond,msg) +# define Assert(cond, msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) -# define Tracec(c,x) -# define Tracecv(c,x) +# define Tracec(c, x) +# define Tracecv(c, x) #endif static int fill_inbuf(void); @@ -175,7 +176,7 @@ static void flush_window(void); static void error(char *m); static void gzip_mark(void **); static void gzip_release(void **); - + /* * This is set up by the setup-routine at boot-time */ @@ -190,7 +191,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */ extern unsigned char input_data[]; extern int input_len; -static long bytes_out = 0; +static long bytes_out; static void *malloc(int size); static void free(void *where); @@ -229,8 +230,10 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error"); - if (free_mem_ptr <= 0) error("Memory error"); + if (size < 0) + error("Malloc error"); + if (free_mem_ptr <= 0) + error("Memory error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ @@ -256,19 +259,19 @@ static void gzip_release(void **ptr) { free_mem_ptr = (memptr) *ptr; } - + static void scroll(void) { int i; - memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 ); - for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 ) + memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2); + for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2) vidmem[i] = ' '; } static void putstr(const char *s) { - int x,y,pos; + int x, y, pos; char c; #ifdef CONFIG_X86_32 @@ -279,18 +282,18 @@ static void putstr(const char *s) x = RM_SCREEN_INFO.orig_x; y = RM_SCREEN_INFO.orig_y; - while ( ( c = *s++ ) != '\0' ) { - if ( c == '\n' ) { + while ((c = *s++) != '\0') { + if (c == '\n') { x = 0; - if ( ++y >= lines ) { + if (++y >= lines) { scroll(); y--; } } else { vidmem [(x + cols * y) * 2] = c; - if ( ++x >= cols ) { + if (++x >= cols) { x = 0; - if ( ++y >= lines ) { + if (++y >= lines) { scroll(); y--; } @@ -308,22 +311,22 @@ static void putstr(const char *s) outb(0xff & (pos >> 1), vidport+1); } -static void* memset(void* s, int c, unsigned n) +static void *memset(void *s, int c, unsigned n) { int i; char *ss = s; - for (i=0;ip_type) { @@ -409,7 +411,7 @@ static void parse_elf(void *output) dest = output; dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); #else - dest = (void*)(phdr->p_paddr); + dest = (void *)(phdr->p_paddr); #endif memcpy(dest, output + phdr->p_offset, -- cgit v1.2.1 From 1180e01de50c0c7683c6648251f32957bc2d7850 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 05:03:48 +0100 Subject: x86: more cleanups in arch/x86/boot/compressed/misc.c Before: total: 7 errors, 8 warnings, 471 lines checked After: total: 5 errors, 5 warnings, 479 lines checked ( the rest cannot be eliminated due to zlib interface cruftiness. ) No code changed: arch/x86/boot/compressed/misc.o: text data bss dec hex filename 10716 8 2152 12876 324c misc.o.before 10716 8 2152 12876 324c misc.o.after md5: 2c20c903986a3c9bca44306c6646067e misc.o.before.asm 2c20c903986a3c9bca44306c6646067e misc.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/misc.c | 100 ++++++++++++++++++++++------------------ 1 file changed, 54 insertions(+), 46 deletions(-) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 50d054c29018..9470a050f8a4 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -58,8 +58,8 @@ * 1 bit (last block flag) * 2 bits (block type) * - * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved. - * The smallest block type encoding is always used. + * 1 block occurs every 32K -1 bytes or when there 50% compression + * has been achieved. The smallest block type encoding is always used. * * stored: * 32 bits length in bytes. @@ -95,9 +95,9 @@ * * All of which is enough to compute an amount of extra data that is required * to be safe. To avoid problems at the block level allocating 5 extra bytes - * per 32767 bytes of data is sufficient. To avoind problems internal to a block - * adding an extra 32767 bytes (the worst case uncompressed block size) is - * sufficient, to ensure that in the worst case the decompressed data for + * per 32767 bytes of data is sufficient. To avoind problems internal to a + * block adding an extra 32767 bytes (the worst case uncompressed block size) + * is sufficient, to ensure that in the worst case the decompressed data for * block will stop the byte before the compressed data for a block begins. * To avoid problems with the compressed data's meta information an extra 18 * bytes are needed. Leading to the formula: @@ -116,52 +116,59 @@ * gzip declarations */ -#define OF(args) args -#define STATIC static +#define OF(args) args +#define STATIC static #undef memset #undef memcpy -#define memzero(s, n) memset ((s), 0, (n)) +#define memzero(s, n) memset((s), 0, (n)) -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; -#define WSIZE 0x80000000 /* - * Window size must be at least 32k, - * and a power of two - * We don't actually have a window just - * a huge output buffer so I report - * a 2G windows size, as that should - * always be larger than our output buffer. - */ +/* + * Window size must be at least 32k, and a power of two. + * We don't actually have a window just a huge output buffer, + * so we report a 2G window size, as that should always be + * larger than our output buffer: + */ +#define WSIZE 0x80000000 + +/* Input buffer: */ +static unsigned char *inbuf; + +/* Sliding window buffer (and final output buffer): */ +static unsigned char *window; + +/* Valid bytes in inbuf: */ +static unsigned insize; -static uch *inbuf; /* input buffer */ -static uch *window; /* Sliding window buffer, (and final output buffer) */ +/* Index of next byte to be processed in inbuf: */ +static unsigned inptr; -static unsigned insize; /* valid bytes in inbuf */ -static unsigned inptr; /* index of next byte to be processed in inbuf */ -static unsigned outcnt; /* bytes in output buffer */ +/* Bytes in output buffer: */ +static unsigned outcnt; /* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6,7: reserved */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAM 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6, 7: reserved */ -#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) +#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) /* Diagnostic functions */ #ifdef DEBUG -# define Assert(cond, msg) {if(!(cond)) error(msg); } -# define Trace(x) fprintf x -# define Tracev(x) {if (verbose) fprintf x ; } -# define Tracevv(x) {if (verbose > 1) fprintf x ; } -# define Tracec(c, x) {if (verbose && (c)) fprintf x ; } -# define Tracecv(c, x) {if (verbose > 1 && (c)) fprintf x ; } +# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0) +# define Trace(x) do { fprintf x; } while (0) +# define Tracev(x) do { if (verbose) fprintf x ; } while (0) +# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0) +# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0) +# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0) #else # define Assert(cond, msg) # define Trace(x) @@ -349,9 +356,9 @@ static void flush_window(void) /* With my window equal to my output buffer * I only need to compute the crc here. */ - ulg c = crc; /* temporary variable */ + unsigned long c = crc; /* temporary variable */ unsigned n; - uch *in, ch; + unsigned char *in, ch; in = window; for (n = 0; n < outcnt; n++) { @@ -359,7 +366,7 @@ static void flush_window(void) c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); } crc = c; - bytes_out += (ulg)outcnt; + bytes_out += (unsigned long)outcnt; outcnt = 0; } @@ -423,8 +430,9 @@ static void parse_elf(void *output) } asmlinkage void decompress_kernel(void *rmode, memptr heap, - uch *input_data, unsigned long input_len, - uch *output) + unsigned char *input_data, + unsigned long input_len, + unsigned char *output) { real_mode = rmode; @@ -447,12 +455,12 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, inptr = 0; #ifdef CONFIG_X86_64 - if ((ulg)output & (__KERNEL_ALIGN - 1)) + if ((unsigned long)output & (__KERNEL_ALIGN - 1)) error("Destination address not 2M aligned"); - if ((ulg)output >= 0xffffffffffUL) + if ((unsigned long)output >= 0xffffffffffUL) error("Destination address too large"); #else - if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1)) + if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1)) error("Destination address not CONFIG_PHYSICAL_ALIGN aligned"); if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) error("Destination address too large"); -- cgit v1.2.1 From 513ad84bf60d96a6998bca10ed07c3d340449be8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 21 Feb 2008 05:18:40 +0100 Subject: x86: de-macro start_thread() Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 15 +++++++++++++++ arch/x86/kernel/process_64.c | 15 +++++++++++++++ include/asm-x86/processor.h | 29 +++-------------------------- 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2cd89b8a7050..9230ce060d09 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -512,6 +512,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, return err; } +void +start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) +{ + __asm__("movl %0, %%gs" :: "r"(0)); + regs->fs = 0; + set_fs(USER_DS); + regs->ds = __USER_DS; + regs->es = __USER_DS; + regs->ss = __USER_DS; + regs->cs = __USER_CS; + regs->ip = new_ip; + regs->sp = new_sp; +} +EXPORT_SYMBOL_GPL(start_thread); + #ifdef CONFIG_SECCOMP static void hard_disable_TSC(void) { diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 46c4c546b499..1ffce14cff6e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -528,6 +528,21 @@ out: return err; } +void +start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) +{ + asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); + load_gs_index(0); + regs->ip = new_ip; + regs->sp = new_sp; + write_pda(oldrsp, new_sp); + regs->cs = __USER_CS; + regs->ss = __USER_DS; + regs->flags = 0x200; + set_fs(USER_DS); +} +EXPORT_SYMBOL_GPL(start_thread); + /* * This special macro can be used to load a debugging register */ diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 43d2cc829a94..9054734589fe 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -817,20 +817,6 @@ static inline void spin_lock_prefetch(const void *x) .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ } -#define start_thread(regs, new_eip, new_esp) \ -do { \ - __asm__("movl %0,%%gs": :"r" (0)); \ - regs->fs = 0; \ - set_fs(USER_DS); \ - regs->ds = __USER_DS; \ - regs->es = __USER_DS; \ - regs->ss = __USER_DS; \ - regs->cs = __USER_CS; \ - regs->ip = new_eip; \ - regs->sp = new_esp; \ -} while (0) - - extern unsigned long thread_saved_pc(struct task_struct *tsk); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) @@ -887,18 +873,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } -#define start_thread(regs, new_rip, new_rsp) do { \ - asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ - load_gs_index(0); \ - (regs)->ip = (new_rip); \ - (regs)->sp = (new_rsp); \ - write_pda(oldrsp, (new_rsp)); \ - (regs)->cs = __USER_CS; \ - (regs)->ss = __USER_DS; \ - (regs)->flags = 0x200; \ - set_fs(USER_DS); \ -} while (0) - /* * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. @@ -909,6 +883,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ #endif /* CONFIG_X86_64 */ +extern void start_thread(struct pt_regs *regs, unsigned long new_ip, + unsigned long new_sp); + /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. -- cgit v1.2.1 From 322850af8d93735f67b8ebf84bb1350639be3f34 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 23 Feb 2008 21:48:42 -0800 Subject: x86: make amd quad core 8 socket system not be clustered_box, #2 quad core 8 socket system will have apic id lifting.the apic id range could be [4, 0x23]. and apic_is_clustered_box will think that need to three clusters and that is large than 2. So it is treated as clustered_box. and will get Marking TSC unstable due to TSCs unsynchronized even the CPUs have X86_FEATURE_CONSTANT_TSC set. this patch will check if the cpu is from AMD. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index d8d03e09dea2..7d8ffdaa0ab3 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -1180,9 +1180,19 @@ __cpuinit int apic_is_clustered_box(void) { int i, clusters, zeros; unsigned id; - u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; + u16 *bios_cpu_apicid; DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); + /* + * there is not this kind of box with AMD CPU yet. + * Some AMD box with quadcore cpu and 8 sockets apicid + * will be [4, 0x23] or [8, 0x27] could be thought to + * have three apic_clusters. So go out early. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return 0; + + bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; bitmap_zero(clustermap, NUM_APIC_CLUSTERS); for (i = 0; i < NR_CPUS; i++) { -- cgit v1.2.1 From 700efc1b9f6afe34caae231b87d129ad8ffb559f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 23 Feb 2008 09:58:20 +0100 Subject: x86: introduce kernel/head32.c Copy x86_64 and add a head32.c so we can start moving early architecture initialization out of assembly. [ Sam Ravnborg : updated it to x86 ] Signed-off-by: Eric W. Biederman Signed-off-by: Sam Ravnborg Cc: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/Makefile | 6 +++--- arch/x86/kernel/Makefile | 3 +-- arch/x86/kernel/head32.c | 14 ++++++++++++++ arch/x86/kernel/head_32.S | 2 +- 4 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 arch/x86/kernel/head32.c diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f1e739a43d41..03131ce1d526 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -159,9 +159,9 @@ KBUILD_AFLAGS += $(mflags-y) ### # Kernel objects -head-y := arch/x86/kernel/head_$(BITS).o -head-$(CONFIG_X86_64) += arch/x86/kernel/head64.o -head-y += arch/x86/kernel/init_task.o +head-y := arch/x86/kernel/head_$(BITS).o +head-y += arch/x86/kernel/head$(BITS).o +head-y += arch/x86/kernel/init_task.o libs-y += arch/x86/lib/ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 80e6695a12a3..df10327182d4 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -2,8 +2,7 @@ # Makefile for the linux kernel. # -extra-y := head_$(BITS).o init_task.o vmlinux.lds -extra-$(CONFIG_X86_64) += head64.o +extra-y := head_$(BITS).o head$(BITS).o init_task.o vmlinux.lds CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c new file mode 100644 index 000000000000..3db059058927 --- /dev/null +++ b/arch/x86/kernel/head32.c @@ -0,0 +1,14 @@ +/* + * linux/arch/i386/kernel/head32.c -- prepare to run common code + * + * Copyright (C) 2000 Andrea Arcangeli SuSE + * Copyright (C) 2007 Eric Biederman + */ + +#include +#include + +void __init i386_start_kernel(void) +{ + start_kernel(); +} diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 74d87ea85b5c..826988a6e964 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -450,7 +450,7 @@ is386: movl $2,%ecx # set MP jmp initialize_secondary # all other CPUs call initialize_secondary 1: #endif /* CONFIG_SMP */ - jmp start_kernel + jmp i386_start_kernel /* * We depend on ET to be correct. This checks for 287/387. -- cgit v1.2.1 From 3def3d6ddf43dbe20c00c3cbc38dfacc8586998f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 22 Feb 2008 17:07:16 -0800 Subject: x86: clean up e820_reserve_resources on 64-bit e820_resource_resources could use insert_resource instead of request_resource also move code_resource, data_resource, bss_resource, and crashk_res out of e820_reserve_resources. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820_64.c | 19 ++----------------- arch/x86/kernel/setup_64.c | 8 +++++++- include/asm-x86/e820_64.h | 3 +-- 3 files changed, 10 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index a8694a35352b..8b914a833ac6 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -229,8 +229,7 @@ unsigned long __init e820_end_of_ram(void) /* * Mark e820 reserved areas as busy for the resource manager. */ -void __init e820_reserve_resources(struct resource *code_resource, - struct resource *data_resource, struct resource *bss_resource) +void __init e820_reserve_resources(void) { int i; for (i = 0; i < e820.nr_map; i++) { @@ -245,21 +244,7 @@ void __init e820_reserve_resources(struct resource *code_resource, res->start = e820.map[i].addr; res->end = res->start + e820.map[i].size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - request_resource(&iomem_resource, res); - if (e820.map[i].type == E820_RAM) { - /* - * We don't know which RAM region contains kernel data, - * so we try it repeatedly and let the resource manager - * test it. - */ - request_resource(res, code_resource); - request_resource(res, data_resource); - request_resource(res, bss_resource); -#ifdef CONFIG_KEXEC - if (crashk_res.start != crashk_res.end) - request_resource(res, &crashk_res); -#endif - } + insert_resource(&iomem_resource, res); } } diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 187f084b9491..e3cb3ea96ca1 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -248,6 +248,7 @@ static void __init reserve_crashkernel(void) (unsigned long)(total_mem >> 20)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); } } #else @@ -322,6 +323,11 @@ void __init setup_arch(char **cmdline_p) finish_e820_parsing(); + /* after parse_early_param, so could debug it */ + insert_resource(&iomem_resource, &code_resource); + insert_resource(&iomem_resource, &data_resource); + insert_resource(&iomem_resource, &bss_resource); + early_gart_iommu_check(); e820_register_active_regions(0, 0, -1UL); @@ -454,7 +460,7 @@ void __init setup_arch(char **cmdline_p) /* * We trust e820 completely. No explicit ROM probing in memory. */ - e820_reserve_resources(&code_resource, &data_resource, &bss_resource); + e820_reserve_resources(); e820_mark_nosave_regions(); /* request I/O space for devices used on all i[345]86 PCs */ diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 9e06c6eb4e27..ef653a403e0b 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -23,8 +23,7 @@ extern void update_memory_range(u64 start, u64 size, unsigned old_type, extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); -extern void e820_reserve_resources(struct resource *code_resource, - struct resource *data_resource, struct resource *bss_resource); +extern void e820_reserve_resources(void); extern void e820_mark_nosave_regions(void); extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); -- cgit v1.2.1 From 1e934dda0c77c8ad13fdda02074f2cfcea118a56 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 22 Feb 2008 13:37:26 -0800 Subject: x86: insert_resorce for lapic addr after e820_reserve_resources when comparing the e820 direct from BIOS, and the one by kexec: BIOS-provided physical RAM map: - BIOS-e820: 0000000000000000 - 0000000000097400 (usable) + BIOS-e820: 0000000000000100 - 0000000000097400 (usable) BIOS-e820: 0000000000097400 - 00000000000a0000 (reserved) BIOS-e820: 00000000000e6000 - 0000000000100000 (reserved) BIOS-e820: 0000000000100000 - 00000000dffa0000 (usable) - BIOS-e820: 00000000dffae000 - 00000000dffb0000 type 9 + BIOS-e820: 00000000dffae000 - 00000000dffb0000 (reserved) BIOS-e820: 00000000dffb0000 - 00000000dffbe000 (ACPI data) BIOS-e820: 00000000dffbe000 - 00000000dfff0000 (ACPI NVS) BIOS-e820: 00000000dfff0000 - 00000000e0000000 (reserved) BIOS-e820: 00000000fec00000 - 00000000fec01000 (reserved) - BIOS-e820: 00000000fee00000 - 00000000fee01000 (reserved) =======> that is the local apic address... somewhere we lost it BIOS-e820: 00000000ff700000 - 0000000100000000 (reserved) BIOS-e820: 0000000100000000 - 0000004020000000 (usable) found one entry about reserved is missing for the kernel by kexec. it turns out init_apic_mappings is called before e820_reserve_resources in setup_arch. but e820_reserve_resources is using request_resource. it will not handle the conflicts. there are three ways to fix it: 1. change request_resource in e820_reserve_resources to to insert_resource 2. move init_apic_mappings after e820_reserve_resources 3. use late_initcall to insert lapic resource. this patch is using method 3, that is less intrusive. in later version could consider to use method 1. before patch fed20000-ffffffff : PCI Bus #00 fee00000-fee00fff : Local APIC fefff000-feffffff : pnp 00:09 ff700000-ffffffff : reserved with patch will get map in first kernel fed20000-ffffffff : PCI Bus #00 fee00000-fee00fff : Local APIC fee00000-fee00fff : reserved fefff000-feffffff : pnp 00:09 ff700000-ffffffff : reserved Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 7d8ffdaa0ab3..ac2405ed504d 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -881,11 +881,6 @@ void __init init_apic_mappings(void) apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys); - /* Put local APIC into the resource map. */ - lapic_resource.start = apic_phys; - lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; - insert_resource(&iomem_resource, &lapic_resource); - /* * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). @@ -1300,3 +1295,21 @@ static __init int setup_apicpmtimer(char *s) } __setup("apicpmtimer", setup_apicpmtimer); +static int __init lapic_insert_resource(void) +{ + if (!apic_phys) + return -1; + + /* Put local APIC into the resource map. */ + lapic_resource.start = apic_phys; + lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; + insert_resource(&iomem_resource, &lapic_resource); + + return 0; +} + +/* + * need call insert after e820_reserve_resources() + * that is using request_resource + */ +late_initcall(lapic_insert_resource); -- cgit v1.2.1 From 023196a3be708657d828bb139343bceb2c1c9649 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:09:26 +0100 Subject: x86: coding style fix to arch/x86/boot/pm.c Before: total: 1 errors, 0 warnings, 178 lines checked After: total: 0 errors, 0 warnings, 178 lines checked No code changed: arch/x86/boot/pm.o: text data bss dec hex filename 351 0 6 357 165 pm.o.before 351 0 6 357 165 pm.o.after md5: 81de3616bceb29691bf835bb62a84ff1 pm.o.before.asm 81de3616bceb29691bf835bb62a84ff1 pm.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/boot/pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c index 1a0f936c160b..a93cb8bded4d 100644 --- a/arch/x86/boot/pm.c +++ b/arch/x86/boot/pm.c @@ -100,7 +100,7 @@ static void reset_coprocessor(void) /* * Set up the GDT */ -#define GDT_ENTRY(flags,base,limit) \ +#define GDT_ENTRY(flags, base, limit) \ (((u64)(base & 0xff000000) << 32) | \ ((u64)flags << 40) | \ ((u64)(limit & 0x00ff0000) << 32) | \ -- cgit v1.2.1 From 60e11746d97c099b305e25e587731148387d02eb Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:09:34 +0100 Subject: x86: coding style fixes to arch/x86/kernel/summit_32.c Before: total: 20 errors, 17 warnings, 180 lines checked After: total: 0 errors, 17 warnings, 183 lines checked No code changed: arch/x86/kernel/summit_32.o: text data bss dec hex filename 932 192 0 1124 464 summit_32.o.before 932 192 0 1124 464 summit_32.o.after md5: 217aa5f002f217e56ef9d8e5c74b60e0 summit_32.o.before.asm 217aa5f002f217e56ef9d8e5c74b60e0 summit_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/summit_32.c | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 72f463401592..c7b579db843d 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -40,38 +40,40 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) int twister = 0, node = 0; int i, bus, num_buses; - for(i = 0; i < rio_table_hdr->num_rio_dev; i++){ - if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id){ + for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { + if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) { twister = rio_devs[i]->owner_id; break; } } - if (i == rio_table_hdr->num_rio_dev){ + if (i == rio_table_hdr->num_rio_dev) { printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __FUNCTION__); return last_bus; } - for(i = 0; i < rio_table_hdr->num_scal_dev; i++){ - if (scal_devs[i]->node_id == twister){ + for (i = 0; i < rio_table_hdr->num_scal_dev; i++) { + if (scal_devs[i]->node_id == twister) { node = scal_devs[i]->node_id; break; } } - if (i == rio_table_hdr->num_scal_dev){ + if (i == rio_table_hdr->num_scal_dev) { printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __FUNCTION__); return last_bus; } - switch (rio_devs[wpeg_num]->type){ + switch (rio_devs[wpeg_num]->type) { case CompatWPEG: - /* The Compatibility Winnipeg controls the 2 legacy buses, + /* + * The Compatibility Winnipeg controls the 2 legacy buses, * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case * a PCI-PCI bridge card is used in either slot: total 5 buses. */ num_buses = 5; break; case AltWPEG: - /* The Alternate Winnipeg controls the 2 133MHz buses [1 slot + /* + * The Alternate Winnipeg controls the 2 133MHz buses [1 slot * each], their 2 "extra" buses, the 100MHz bus [2 slots] and * the "extra" buses for each of those slots: total 7 buses. */ @@ -79,7 +81,8 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) break; case LookOutAWPEG: case LookOutBWPEG: - /* A Lookout Winnipeg controls 3 100MHz buses [2 slots each] + /* + * A Lookout Winnipeg controls 3 100MHz buses [2 slots each] * & the "extra" buses for each of those slots: total 9 buses. */ num_buses = 9; @@ -89,7 +92,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) return last_bus; } - for(bus = last_bus; bus < last_bus + num_buses; bus++) + for (bus = last_bus; bus < last_bus + num_buses; bus++) mp_bus_id_to_node[bus] = node; return bus; } @@ -99,12 +102,12 @@ static int __init build_detail_arrays(void) unsigned long ptr; int i, scal_detail_size, rio_detail_size; - if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){ + if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __FUNCTION__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); return 0; } - switch (rio_table_hdr->version){ + switch (rio_table_hdr->version) { default: printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __FUNCTION__, rio_table_hdr->version); return 0; @@ -119,10 +122,10 @@ static int __init build_detail_arrays(void) } ptr = (unsigned long)rio_table_hdr + 3; - for(i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) + for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) scal_devs[i] = (struct scal_detail *)ptr; - for(i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) + for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) rio_devs[i] = (struct rio_detail *)ptr; return 1; @@ -140,9 +143,9 @@ void __init setup_summit(void) rio_table_hdr = NULL; offset = 0x180; - while (offset){ + while (offset) { /* The block id is stored in the 2nd word */ - if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){ + if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) { /* set the pointer past the offset & block id */ rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4); break; @@ -150,7 +153,7 @@ void __init setup_summit(void) /* The next offset is stored in the 1st word. 0 means no more */ offset = *((unsigned short *)(ptr + offset)); } - if (!rio_table_hdr){ + if (!rio_table_hdr) { printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __FUNCTION__); return; } @@ -161,8 +164,8 @@ void __init setup_summit(void) /* The first Winnipeg we're looking for has an index of 0 */ next_wpeg = 0; do { - for(i = 0; i < rio_table_hdr->num_rio_dev; i++){ - if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg){ + for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { + if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) { /* It's the Winnipeg we're looking for! */ next_bus = setup_pci_node_map_for_wpeg(i, next_bus); next_wpeg++; -- cgit v1.2.1 From 65eb6b4326daddd1cccd003bd4df3fd75b06f0e1 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:09:42 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/intel.c Before: total: 37 errors, 16 warnings, 366 lines checked After: total: 0 errors, 15 warnings, 369 lines checked No code changed: arch/x86/kernel/cpu/intel.o: text data bss dec hex filename 1534 452 0 1986 7c2 intel.o.before 1534 452 0 1986 7c2 intel.o.after md5: 1ca348a06de6eb354c4b6ea715a57db5 intel.o.before.asm 1ca348a06de6eb354c4b6ea715a57db5 intel.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 83 +++++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 34468b2e2507..c9ecf378cc41 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -45,7 +45,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) * * This is called before we do cpu ident work */ - + int __cpuinit ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ @@ -58,7 +58,7 @@ int __cpuinit ppro_with_ram_bug(void) } return 0; } - + /* * P4 Xeon errata 037 workaround. @@ -69,7 +69,7 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) unsigned long lo, hi; if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); + rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); if ((lo & (1<<9)) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); @@ -127,10 +127,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) */ c->f00f_bug = 0; if (!paravirt_enabled() && c->x86 == 5) { - static int f00f_workaround_enabled = 0; + static int f00f_workaround_enabled; c->f00f_bug = 1; - if ( !f00f_workaround_enabled ) { + if (!f00f_workaround_enabled) { trap_init_f00f_bug(); printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; @@ -139,7 +139,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) #endif l2 = init_intel_cacheinfo(c); - if (c->cpuid_level > 9 ) { + if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) @@ -150,9 +150,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_bit(X86_FEATURE_SEP, c->x86_capability); - /* Names for the Pentium II/Celeron processors - detectable only by also checking the cache size. - Dixon is NOT a Celeron. */ + /* + * Names for the Pentium II/Celeron processors + * detectable only by also checking the cache size. + * Dixon is NOT a Celeron. + */ if (c->x86 == 6) { switch (c->x86_model) { case 5: @@ -163,14 +165,14 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) p = "Mobile Pentium II (Dixon)"; } break; - + case 6: if (l2 == 128) p = "Celeron (Mendocino)"; else if (c->x86_mask == 0 || c->x86_mask == 5) p = "Celeron-A"; break; - + case 8: if (l2 == 128) p = "Celeron (Coppermine)"; @@ -178,9 +180,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) } } - if ( p ) + if (p) strcpy(c->x86_model_id, p); - + c->x86_max_cores = num_cpu_cores(c); detect_ht(c); @@ -211,7 +213,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) if (c->x86 == 15) { set_bit(X86_FEATURE_P4, c->x86_capability); } - if (c->x86 == 6) + if (c->x86 == 6) set_bit(X86_FEATURE_P3, c->x86_capability); if (cpu_has_ds) { unsigned int l1; @@ -226,9 +228,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ds_init_intel(c); } -static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) +static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) { - /* Intel PIII Tualatin. This comes in two flavours. + /* + * Intel PIII Tualatin. This comes in two flavours. * One has 256kb of cache, the other 512. We have no way * to determine which, so we use a boottime override * for the 512kb model, and assume 256 otherwise. @@ -240,42 +243,42 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned static struct cpu_dev intel_cpu_dev __cpuinitdata = { .c_vendor = "Intel", - .c_ident = { "GenuineIntel" }, + .c_ident = { "GenuineIntel" }, .c_models = { - { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = - { - [0] = "486 DX-25/33", - [1] = "486 DX-50", - [2] = "486 SX", - [3] = "486 DX/2", - [4] = "486 SL", - [5] = "486 SX/2", - [7] = "486 DX/2-WB", - [8] = "486 DX/4", + { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = + { + [0] = "486 DX-25/33", + [1] = "486 DX-50", + [2] = "486 SX", + [3] = "486 DX/2", + [4] = "486 SL", + [5] = "486 SX/2", + [7] = "486 DX/2-WB", + [8] = "486 DX/4", [9] = "486 DX/4-WB" } }, { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = - { - [0] = "Pentium 60/66 A-step", - [1] = "Pentium 60/66", + { + [0] = "Pentium 60/66 A-step", + [1] = "Pentium 60/66", [2] = "Pentium 75 - 200", - [3] = "OverDrive PODP5V83", + [3] = "OverDrive PODP5V83", [4] = "Pentium MMX", - [7] = "Mobile Pentium 75 - 200", + [7] = "Mobile Pentium 75 - 200", [8] = "Mobile Pentium MMX" } }, { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = - { + { [0] = "Pentium Pro A-step", - [1] = "Pentium Pro", - [3] = "Pentium II (Klamath)", - [4] = "Pentium II (Deschutes)", - [5] = "Pentium II (Deschutes)", + [1] = "Pentium Pro", + [3] = "Pentium II (Klamath)", + [4] = "Pentium II (Deschutes)", + [5] = "Pentium II (Deschutes)", [6] = "Mobile Pentium II", - [7] = "Pentium III (Katmai)", - [8] = "Pentium III (Coppermine)", + [7] = "Pentium III (Katmai)", + [8] = "Pentium III (Coppermine)", [10] = "Pentium III (Cascades)", [11] = "Pentium III (Tualatin)", } @@ -361,5 +364,5 @@ unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) EXPORT_SYMBOL(cmpxchg_486_u64); #endif -// arch_initcall(intel_cpu_init); +/* arch_initcall(intel_cpu_init); */ -- cgit v1.2.1 From d717ca84c02f4b60a85502c23bca5698801dfaae Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:09:50 +0100 Subject: x86: coding style fixes to arch/x86/oprofile/init.c Before: total: 5 errors, 4 warnings, 48 lines checked After: total: 0 errors, 4 warnings, 49 lines checked No code changed: arch/x86/oprofile/init.o: text data bss dec hex filename 42 0 0 42 2a init.o.before 42 0 0 42 2a init.o.after md5: 74c94c315cfbf245aeba36eceac57e66 init.o.before.asm 74c94c315cfbf245aeba36eceac57e66 init.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/oprofile/init.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c index 5341d481d92f..cdfe4c54deca 100644 --- a/arch/x86/oprofile/init.c +++ b/arch/x86/oprofile/init.c @@ -10,18 +10,19 @@ #include #include #include - -/* We support CPUs that have performance counters like the Pentium Pro + +/* + * We support CPUs that have performance counters like the Pentium Pro * with the NMI mode driver. */ - -extern int op_nmi_init(struct oprofile_operations * ops); -extern int op_nmi_timer_init(struct oprofile_operations * ops); + +extern int op_nmi_init(struct oprofile_operations *ops); +extern int op_nmi_timer_init(struct oprofile_operations *ops); extern void op_nmi_exit(void); extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); -int __init oprofile_arch_init(struct oprofile_operations * ops) +int __init oprofile_arch_init(struct oprofile_operations *ops) { int ret; -- cgit v1.2.1 From f73920cd63d316008738427a0df2caab6cc88ad7 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:09:56 +0100 Subject: x86: coding style fixes to arch/x86/lib/strstr_3 Before: total: 3 errors, 0 warnings, 31 lines checked After: total: 0 errors, 0 warnings, 31 lines checked No code changed: arch/x86/lib/strstr_32.o: text data bss dec hex filename 49 0 0 49 31 strstr_32.o.before 49 0 0 49 31 strstr_32.o.after md5: a224a7c4082e75a4f31f9d91dd34fe8e strstr_32.o.before.asm a224a7c4082e75a4f31f9d91dd34fe8e strstr_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/lib/strstr_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index a3dafbf59dae..42e8a50303f3 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c @@ -1,9 +1,9 @@ #include -char * strstr(const char * cs,const char * ct) +char *strstr(const char *cs, const char *ct) { int d0, d1; -register char * __res; +register char *__res; __asm__ __volatile__( "movl %6,%%edi\n\t" "repne\n\t" -- cgit v1.2.1 From 0067cc996ee7a0dd282d8da5b64fa60aa2066bb2 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:06 +0100 Subject: x86: coding style fixes to arch/x86/kernel/mca_32.c Before: total: 42 errors, 3 warnings, 469 lines checked After: total: 0 errors, 3 warnings, 479 lines checked No code changed: arch/x86/kernel/mca_32.o: text data bss dec hex filename 1832 288 5 2125 84d mca_32.o.before 1832 288 5 2125 84d mca_32.o.after md5: c0e45e2b743ce26349eb07dc53e80b94 mca_32.o.before.asm c0e45e2b743ce26349eb07dc53e80b94 mca_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/mca_32.c | 96 ++++++++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 43 deletions(-) diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index 9482033ed0fe..2dc183758be3 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c @@ -53,9 +53,9 @@ #include #include -static unsigned char which_scsi = 0; +static unsigned char which_scsi; -int MCA_bus = 0; +int MCA_bus; EXPORT_SYMBOL(MCA_bus); /* @@ -68,15 +68,17 @@ static DEFINE_SPINLOCK(mca_lock); /* Build the status info for the adapter */ -static void mca_configure_adapter_status(struct mca_device *mca_dev) { +static void mca_configure_adapter_status(struct mca_device *mca_dev) +{ mca_dev->status = MCA_ADAPTER_NONE; mca_dev->pos_id = mca_dev->pos[0] + (mca_dev->pos[1] << 8); - if(!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) { + if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) { - /* id = 0x0000 usually indicates hardware failure, + /* + * id = 0x0000 usually indicates hardware failure, * however, ZP Gu (zpg@castle.net> reports that his 9556 * has 0x0000 as id and everything still works. There * also seem to be an adapter with id = 0x0000; the @@ -87,9 +89,10 @@ static void mca_configure_adapter_status(struct mca_device *mca_dev) { mca_dev->status = MCA_ADAPTER_ERROR; return; - } else if(mca_dev->pos_id != 0xffff) { + } else if (mca_dev->pos_id != 0xffff) { - /* 0xffff usually indicates that there's no adapter, + /* + * 0xffff usually indicates that there's no adapter, * however, some integrated adapters may have 0xffff as * their id and still be valid. Examples are on-board * VGA of the 55sx, the integrated SCSI of the 56 & 57, @@ -99,19 +102,19 @@ static void mca_configure_adapter_status(struct mca_device *mca_dev) { mca_dev->status = MCA_ADAPTER_NORMAL; } - if((mca_dev->pos_id == 0xffff || + if ((mca_dev->pos_id == 0xffff || mca_dev->pos_id == 0x0000) && mca_dev->slot >= MCA_MAX_SLOT_NR) { int j; - for(j = 2; j < 8; j++) { - if(mca_dev->pos[j] != 0xff) { + for (j = 2; j < 8; j++) { + if (mca_dev->pos[j] != 0xff) { mca_dev->status = MCA_ADAPTER_NORMAL; break; } } } - if(!(mca_dev->pos[2] & MCA_ENABLED)) { + if (!(mca_dev->pos[2] & MCA_ENABLED)) { /* enabled bit is in POS 2 */ @@ -133,7 +136,7 @@ static struct resource mca_standard_resources[] = { #define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources) -/** +/* * mca_read_and_store_pos - read the POS registers into a memory buffer * @pos: a char pointer to 8 bytes, contains the POS register value on * successful return @@ -141,12 +144,14 @@ static struct resource mca_standard_resources[] = { * Returns 1 if a card actually exists (i.e. the pos isn't * all 0xff) or 0 otherwise */ -static int mca_read_and_store_pos(unsigned char *pos) { +static int mca_read_and_store_pos(unsigned char *pos) +{ int j; int found = 0; - for(j=0; j<8; j++) { - if((pos[j] = inb_p(MCA_POS_REG(j))) != 0xff) { + for (j = 0; j < 8; j++) { + pos[j] = inb_p(MCA_POS_REG(j)); + if (pos[j] != 0xff) { /* 0xff all across means no device. 0x00 means * something's broken, but a device is * probably there. However, if you get 0x00 @@ -167,11 +172,11 @@ static unsigned char mca_pc_read_pos(struct mca_device *mca_dev, int reg) unsigned char byte; unsigned long flags; - if(reg < 0 || reg >= 8) + if (reg < 0 || reg >= 8) return 0; spin_lock_irqsave(&mca_lock, flags); - if(mca_dev->pos_register) { + if (mca_dev->pos_register) { /* Disable adapter setup, enable motherboard setup */ outb_p(0, MCA_ADAPTER_SETUP_REG); @@ -203,7 +208,7 @@ static void mca_pc_write_pos(struct mca_device *mca_dev, int reg, { unsigned long flags; - if(reg < 0 || reg >= 8) + if (reg < 0 || reg >= 8) return; spin_lock_irqsave(&mca_lock, flags); @@ -227,17 +232,17 @@ static void mca_pc_write_pos(struct mca_device *mca_dev, int reg, } /* for the primary MCA bus, we have identity transforms */ -static int mca_dummy_transform_irq(struct mca_device * mca_dev, int irq) +static int mca_dummy_transform_irq(struct mca_device *mca_dev, int irq) { return irq; } -static int mca_dummy_transform_ioport(struct mca_device * mca_dev, int port) +static int mca_dummy_transform_ioport(struct mca_device *mca_dev, int port) { return port; } -static void *mca_dummy_transform_memory(struct mca_device * mca_dev, void *mem) +static void *mca_dummy_transform_memory(struct mca_device *mca_dev, void *mem) { return mem; } @@ -251,7 +256,8 @@ static int __init mca_init(void) short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00}; struct mca_bus *bus; - /* WARNING: Be careful when making changes here. Putting an adapter + /* + * WARNING: Be careful when making changes here. Putting an adapter * and the motherboard simultaneously into setup mode may result in * damage to chips (according to The Indispensible PC Hardware Book * by Hans-Peter Messmer). Also, we disable system interrupts (so @@ -283,7 +289,7 @@ static int __init mca_init(void) /* get the motherboard device */ mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL); - if(unlikely(!mca_dev)) + if (unlikely(!mca_dev)) goto out_nomem; /* @@ -309,7 +315,7 @@ static int __init mca_init(void) mca_register_device(MCA_PRIMARY_BUS, mca_dev); mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); - if(unlikely(!mca_dev)) + if (unlikely(!mca_dev)) goto out_unlock_nomem; /* Put motherboard into video setup mode, read integrated video @@ -326,7 +332,8 @@ static int __init mca_init(void) mca_dev->slot = MCA_INTEGVIDEO; mca_register_device(MCA_PRIMARY_BUS, mca_dev); - /* Put motherboard into scsi setup mode, read integrated scsi + /* + * Put motherboard into scsi setup mode, read integrated scsi * POS registers, and turn motherboard setup off. * * It seems there are two possible SCSI registers. Martin says that @@ -338,18 +345,18 @@ static int __init mca_init(void) * machine. */ - for(i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) { + for (i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) { outb_p(which_scsi, MCA_MOTHERBOARD_SETUP_REG); - if(mca_read_and_store_pos(pos)) + if (mca_read_and_store_pos(pos)) break; } - if(which_scsi) { + if (which_scsi) { /* found a scsi card */ mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); - if(unlikely(!mca_dev)) + if (unlikely(!mca_dev)) goto out_unlock_nomem; - for(j = 0; j < 8; j++) + for (j = 0; j < 8; j++) mca_dev->pos[j] = pos[j]; mca_configure_adapter_status(mca_dev); @@ -364,21 +371,22 @@ static int __init mca_init(void) outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); - /* Now loop over MCA slots: put each adapter into setup mode, and + /* + * Now loop over MCA slots: put each adapter into setup mode, and * read its POS registers. Then put adapter setup off. */ - for(i=0; ipos[j]=pos[j]; + for (j = 0; j < 8; j++) + mca_dev->pos[j] = pos[j]; mca_dev->driver_loaded = 0; mca_dev->slot = i; @@ -414,20 +422,20 @@ mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag) { int slot = mca_dev->slot; - if(slot == MCA_INTEGSCSI) { + if (slot == MCA_INTEGSCSI) { printk(KERN_CRIT "NMI: caused by MCA integrated SCSI adapter (%s)\n", mca_dev->name); - } else if(slot == MCA_INTEGVIDEO) { + } else if (slot == MCA_INTEGVIDEO) { printk(KERN_CRIT "NMI: caused by MCA integrated video adapter (%s)\n", mca_dev->name); - } else if(slot == MCA_MOTHERBOARD) { + } else if (slot == MCA_MOTHERBOARD) { printk(KERN_CRIT "NMI: caused by motherboard (%s)\n", mca_dev->name); } /* More info available in POS 6 and 7? */ - if(check_flag) { + if (check_flag) { unsigned char pos6, pos7; pos6 = mca_device_read_pos(mca_dev, 6); @@ -447,8 +455,9 @@ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data) pos5 = mca_device_read_pos(mca_dev, 5); - if(!(pos5 & 0x80)) { - /* Bit 7 of POS 5 is reset when this adapter has a hardware + if (!(pos5 & 0x80)) { + /* + * Bit 7 of POS 5 is reset when this adapter has a hardware * error. Bit 7 it reset if there's error information * available in POS 6 and 7. */ @@ -460,7 +469,8 @@ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data) void __kprobes mca_handle_nmi(void) { - /* First try - scan the various adapters and see if a specific + /* + * First try - scan the various adapters and see if a specific * adapter was responsible for the error. */ bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); -- cgit v1.2.1 From e0f025704437dfd6cb5adc077f05709c31189edc Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:16 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/mtrr/state.c Before: total: 6 errors, 5 warnings, 80 lines checked After: total: 0 errors, 4 warnings, 82 lines checked No code changed: arch/x86/kernel/cpu/mtrr/state.o: text data bss dec hex filename 313 0 4 317 13d state.o.before 313 0 4 317 13d state.o.after md5: a0fbd61096205f9180f0bf45ed386d61 state.o.before.asm a0fbd61096205f9180f0bf45ed386d61 state.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/state.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 9f8ba923d1c9..7f7e2753685b 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c @@ -19,13 +19,15 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) if (use_intel() || is_cpu(CYRIX)) { /* Save value of CR4 and clear Page Global Enable (bit 7) */ - if ( cpu_has_pge ) { + if (cpu_has_pge) { ctxt->cr4val = read_cr4(); write_cr4(ctxt->cr4val & ~X86_CR4_PGE); } - /* Disable and flush caches. Note that wbinvd flushes the TLBs as - a side-effect */ + /* + * Disable and flush caches. Note that wbinvd flushes the TLBs + * as a side-effect + */ cr0 = read_cr0() | X86_CR0_CD; wbinvd(); write_cr0(cr0); @@ -42,7 +44,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) { - if (use_intel()) + if (use_intel()) /* Disable MTRRs, and set the default type to uncached */ mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); @@ -66,12 +68,12 @@ void set_mtrr_done(struct set_mtrr_context *ctxt) else /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ctxt->ccr3); - + /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); /* Restore value of CR4 */ - if ( cpu_has_pge ) + if (cpu_has_pge) write_cr4(ctxt->cr4val); } /* Re-enable interrupts locally (if enabled previously) */ -- cgit v1.2.1 From 93d8bd3d4f070014e1e73c0ac618ac33924a7b96 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:23 +0100 Subject: x86: coding style fixes to arch/x86/lib/memcpy_32.c Before: total: 2 errors, 0 warnings, 43 lines checked After: total: 0 errors, 0 warnings, 43 lines checked No code changed: arch/x86/lib/memcpy_32.o: text data bss dec hex filename 164 0 0 164 a4 memcpy_32.o.before 164 0 0 164 a4 memcpy_32.o.after md5: d759f55621af27f51720b59c8ca96a4d memcpy_32.o.before.asm d759f55621af27f51720b59c8ca96a4d memcpy_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/lib/memcpy_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 37756b6fb329..5415a9d06f53 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -25,7 +25,7 @@ void *memmove(void *dest, const void *src, size_t n) int d0, d1, d2; if (dest < src) { - memcpy(dest,src,n); + memcpy(dest, src, n); } else { __asm__ __volatile__( "std\n\t" -- cgit v1.2.1 From f97518271941fdb2dab07d7bd58bf9fa39ba3f65 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:28 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/transmeta.c Before: total: 13 errors, 3 warnings, 105 lines checked After: total: 0 errors, 3 warnings, 107 lines checked No code changed: arch/x86/kernel/cpu/transmeta.o: text data bss dec hex filename 713 324 0 1037 40d transmeta.o.before 713 324 0 1037 40d transmeta.o.after md5: 19abe2cafac617e1e2aadc4aa4e9923b transmeta.o.before.asm 19abe2cafac617e1e2aadc4aa4e9923b transmeta.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/transmeta.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index c2d168e992f4..daee21d208f4 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -18,8 +18,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) /* Print CMS and CPU revision */ max = cpuid_eax(0x80860000); cpu_rev = 0; - if ( max >= 0x80860001 ) { - cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); + if (max >= 0x80860001) { + cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); if (cpu_rev != 0x02000000) { printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", (cpu_rev >> 24) & 0xff, @@ -29,7 +29,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) cpu_freq); } } - if ( max >= 0x80860002 ) { + if (max >= 0x80860002) { cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); if (cpu_rev == 0x02000000) { printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", @@ -42,7 +42,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) cms_rev1 & 0xff, cms_rev2); } - if ( max >= 0x80860006 ) { + if (max >= 0x80860006) { cpuid(0x80860003, (void *)&cpu_info[0], (void *)&cpu_info[4], @@ -75,22 +75,24 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) /* All Transmeta CPUs have a constant TSC */ set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); - + #ifdef CONFIG_SYSCTL - /* randomize_va_space slows us down enormously; - it probably triggers retranslation of x86->native bytecode */ + /* + * randomize_va_space slows us down enormously; + * it probably triggers retranslation of x86->native bytecode + */ randomize_va_space = 0; #endif } -static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c) +static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c) { u32 xlvl; /* Transmeta-defined flags: level 0x80860001 */ xlvl = cpuid_eax(0x80860000); - if ( (xlvl & 0xffff0000) == 0x80860000 ) { - if ( xlvl >= 0x80860001 ) + if ((xlvl & 0xffff0000) == 0x80860000) { + if (xlvl >= 0x80860001) c->x86_capability[2] = cpuid_edx(0x80860001); } } -- cgit v1.2.1 From fb87a298fb79357fa5b27e6916ae1c45bf94dac7 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:33 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/amd.c Before: total: 42 errors, 26 warnings, 350 lines checked After: total: 0 errors, 26 warnings, 352 lines checked No code changed: arch/x86/kernel/cpu/amd.o: text data bss dec hex filename 1936 328 0 2264 8d8 amd.o.before 1936 328 0 2264 8d8 amd.o.after md5: 873430a88faaf31bb4bbfe3a2a691e45 amd.o.before.asm 873430a88faaf31bb4bbfe3a2a691e45 amd.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/amd.c | 94 ++++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 46 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index cab4e562b5cb..1a3e1bb4d758 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -20,7 +20,7 @@ * the chip setting when fixing the bug but they also tweaked some * performance at the same time.. */ - + extern void vide(void); __asm__(".align 4\nvide: ret"); @@ -81,7 +81,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP unsigned long long value; - /* Disable TLB flush filter by setting HWCR.FFDIS on K8 + /* + * Disable TLB flush filter by setting HWCR.FFDIS on K8 * bit 6 of msr C001_0015 * * Errata 63 for SH-B3 steppings @@ -102,15 +103,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) * no bus pipeline) */ - /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; - 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ + /* + * Bit 31 in normal CPUID used for nonstandard 3DNow ID; + * DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway + */ clear_bit(0*32+31, c->x86_capability); - + r = get_model_name(c); - switch(c->x86) - { - case 4: + switch (c->x86) { + case 4: /* * General Systems BIOSen alias the cpu frequency registers * of the Elan at 0x000df000. Unfortuantly, one of the Linux @@ -120,61 +122,60 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ #define CBAR_ENB (0x80000000) #define CBAR_KEY (0X000000CB) - if (c->x86_model==9 || c->x86_model == 10) { + if (c->x86_model == 9 || c->x86_model == 10) { if (inl (CBAR) & CBAR_ENB) outl (0 | CBAR_KEY, CBAR); } break; - case 5: - if( c->x86_model < 6 ) - { + case 5: + if (c->x86_model < 6) { /* Based on AMD doc 20734R - June 2000 */ - if ( c->x86_model == 0 ) { + if (c->x86_model == 0) { clear_bit(X86_FEATURE_APIC, c->x86_capability); set_bit(X86_FEATURE_PGE, c->x86_capability); } break; } - - if ( c->x86_model == 6 && c->x86_mask == 1 ) { + + if (c->x86_model == 6 && c->x86_mask == 1) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); unsigned long d, d2; - + printk(KERN_INFO "AMD K6 stepping B detected - "); - + /* - * It looks like AMD fixed the 2.6.2 bug and improved indirect + * It looks like AMD fixed the 2.6.2 bug and improved indirect * calls at the same time. */ n = K6_BUG_LOOP; f_vide = vide; rdtscl(d); - while (n--) + while (n--) f_vide(); rdtscl(d2); d = d2-d; - if (d > 20*K6_BUG_LOOP) + if (d > 20*K6_BUG_LOOP) printk("system stability may be impaired when more than 32 MB are used.\n"); - else + else printk("probably OK (after B9730xxxx).\n"); printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); } /* K6 with old style WHCR */ if (c->x86_model < 8 || - (c->x86_model== 8 && c->x86_mask < 8)) { + (c->x86_model == 8 && c->x86_mask < 8)) { /* We can only write allocate on the low 508Mb */ - if(mbytes>508) - mbytes=508; + if (mbytes > 508) + mbytes = 508; rdmsr(MSR_K6_WHCR, l, h); - if ((l&0x0000FFFF)==0) { + if ((l&0x0000FFFF) == 0) { unsigned long flags; - l=(1<<0)|((mbytes/4)<<1); + l = (1<<0)|((mbytes/4)<<1); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); @@ -185,17 +186,17 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) break; } - if ((c->x86_model == 8 && c->x86_mask >7) || + if ((c->x86_model == 8 && c->x86_mask > 7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ - if(mbytes>4092) - mbytes=4092; + if (mbytes > 4092) + mbytes = 4092; rdmsr(MSR_K6_WHCR, l, h); - if ((l&0xFFFF0000)==0) { + if ((l&0xFFFF0000) == 0) { unsigned long flags; - l=((mbytes>>2)<<22)|(1<<16); + l = ((mbytes>>2)<<22)|(1<<16); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); @@ -217,10 +218,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) break; } break; - case 6: /* An Athlon/Duron */ - - /* Bit 15 of Athlon specific MSR 15, needs to be 0 - * to enable SSE on Palomino/Morgan/Barton CPU's. + case 6: /* An Athlon/Duron */ + + /* + * Bit 15 of Athlon specific MSR 15, needs to be 0 + * to enable SSE on Palomino/Morgan/Barton CPU's. * If the BIOS didn't enable it already, enable it here. */ if (c->x86_model >= 6 && c->x86_model <= 10) { @@ -233,11 +235,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } - /* It's been determined by AMD that Athlons since model 8 stepping 1 + /* + * It's been determined by AMD that Athlons since model 8 stepping 1 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * As per AMD technical note 27212 0.2 */ - if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { + if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, @@ -256,7 +259,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) set_bit(X86_FEATURE_K8, c->x86_capability); break; case 6: - set_bit(X86_FEATURE_K7, c->x86_capability); + set_bit(X86_FEATURE_K7, c->x86_capability); break; } if (c->x86 >= 6) @@ -264,9 +267,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) display_cacheinfo(c); - if (cpuid_eax(0x80000000) >= 0x80000008) { + if (cpuid_eax(0x80000000) >= 0x80000008) c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; - } #ifdef CONFIG_X86_HT /* @@ -308,14 +310,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); } -static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) +static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ size = 64; if (c->x86_model == 4 && - (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ + (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ size = 256; } return size; @@ -323,16 +325,16 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned in static struct cpu_dev amd_cpu_dev __cpuinitdata = { .c_vendor = "AMD", - .c_ident = { "AuthenticAMD" }, + .c_ident = { "AuthenticAMD" }, .c_models = { { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = { [3] = "486 DX/2", [7] = "486 DX/2-WB", - [8] = "486 DX/4", - [9] = "486 DX/4-WB", + [8] = "486 DX/4", + [9] = "486 DX/4-WB", [14] = "Am5x86-WT", - [15] = "Am5x86-WB" + [15] = "Am5x86-WB" } }, }, -- cgit v1.2.1 From 83e714e82f3434a32c3f54f7223f78345d873218 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:40 +0100 Subject: x86: coding style fixes to arch/x86/kernel/vm86_32.c Before: total: 64 errors, 18 warnings, 840 lines checked After: total: 12 errors, 15 warnings, 844 lines checked No code changed: arch/x86/kernel/vm86_32.o: text data bss dec hex filename 4449 28 132 4609 1201 vm86_32.o.before 4449 28 132 4609 1201 vm86_32.o.after md5: e4e51ed7689d17f04148554a3c6d5bb6 vm86_32.o.before.asm e4e51ed7689d17f04148554a3c6d5bb6 vm86_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/vm86_32.c | 174 ++++++++++++++++++++++++---------------------- 1 file changed, 89 insertions(+), 85 deletions(-) diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 738c2104df30..6a91fcf92d67 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -64,7 +64,7 @@ #define KVM86 ((struct kernel_vm86_struct *)regs) -#define VMPI KVM86->vm86plus +#define VMPI KVM86->vm86plus /* @@ -81,7 +81,7 @@ #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) #define VEFLAGS (current->thread.v86flags) -#define set_flags(X,new,mask) \ +#define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) #define SAFE_MASK (0xDD5) @@ -93,8 +93,10 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, { int ret = 0; - /* kernel_vm86_regs is missing gs, so copy everything up to - (but not including) orig_eax, and then rest including orig_eax. */ + /* + * kernel_vm86_regs is missing gs, so copy everything up to + * (but not including) orig_eax, and then rest including orig_eax. + */ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, sizeof(struct kernel_vm86_regs) - @@ -120,7 +122,7 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, return ret; } -struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) +struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) { struct tss_struct *tss; struct pt_regs *ret; @@ -138,8 +140,8 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) do_exit(SIGSEGV); } set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); - tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); - tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); + tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); + tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); if (tmp) { printk("vm86: could not access userspace vm86_info\n"); do_exit(SIGSEGV); @@ -237,20 +239,21 @@ asmlinkage int sys_vm86(struct pt_regs regs) tsk = current; switch (regs.bx) { - case VM86_REQUEST_IRQ: - case VM86_FREE_IRQ: - case VM86_GET_IRQ_BITS: - case VM86_GET_AND_RESET_IRQ: - ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); - goto out; - case VM86_PLUS_INSTALL_CHECK: - /* NOTE: on old vm86 stuff this will return the error - from access_ok(), because the subfunction is - interpreted as (invalid) address to vm86_struct. - So the installation check works. - */ - ret = 0; - goto out; + case VM86_REQUEST_IRQ: + case VM86_FREE_IRQ: + case VM86_GET_IRQ_BITS: + case VM86_GET_AND_RESET_IRQ: + ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); + goto out; + case VM86_PLUS_INSTALL_CHECK: + /* + * NOTE: on old vm86 stuff this will return the error + * from access_ok(), because the subfunction is + * interpreted as (invalid) address to vm86_struct. + * So the installation check works. + */ + ret = 0; + goto out; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ @@ -299,18 +302,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk info->regs.pt.flags |= VM_MASK; switch (info->cpu_type) { - case CPU_286: - tsk->thread.v86mask = 0; - break; - case CPU_386: - tsk->thread.v86mask = NT_MASK | IOPL_MASK; - break; - case CPU_486: - tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; - break; - default: - tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; - break; + case CPU_286: + tsk->thread.v86mask = 0; + break; + case CPU_386: + tsk->thread.v86mask = NT_MASK | IOPL_MASK; + break; + case CPU_486: + tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; + break; + default: + tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; + break; } /* @@ -346,9 +349,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk /* we never return here */ } -static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) +static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) { - struct pt_regs * regs32; + struct pt_regs *regs32; regs32 = save_v86_state(regs16); regs32->ax = retval; @@ -358,29 +361,30 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) : : "r" (regs32), "r" (current_thread_info())); } -static inline void set_IF(struct kernel_vm86_regs * regs) +static inline void set_IF(struct kernel_vm86_regs *regs) { VEFLAGS |= VIF_MASK; if (VEFLAGS & VIP_MASK) return_to_32bit(regs, VM86_STI); } -static inline void clear_IF(struct kernel_vm86_regs * regs) +static inline void clear_IF(struct kernel_vm86_regs *regs) { VEFLAGS &= ~VIF_MASK; } -static inline void clear_TF(struct kernel_vm86_regs * regs) +static inline void clear_TF(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~TF_MASK; } -static inline void clear_AC(struct kernel_vm86_regs * regs) +static inline void clear_AC(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~AC_MASK; } -/* It is correct to call set_IF(regs) from the set_vflags_* +/* + * It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should @@ -391,7 +395,7 @@ static inline void clear_AC(struct kernel_vm86_regs * regs) * [KD] */ -static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) +static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { set_flags(VEFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); @@ -401,7 +405,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs clear_IF(regs); } -static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) +static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { set_flags(VFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); @@ -411,7 +415,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg clear_IF(regs); } -static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) +static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) { unsigned long flags = regs->pt.flags & RETURN_MASK; @@ -421,11 +425,11 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) return flags | (VEFLAGS & current->thread.v86mask); } -static inline int is_revectored(int nr, struct revectored_struct * bitmap) +static inline int is_revectored(int nr, struct revectored_struct *bitmap) { __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" :"=r" (nr) - :"m" (*bitmap),"r" (nr)); + :"m" (*bitmap), "r" (nr)); return nr; } @@ -437,7 +441,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ - } while(0) + } while (0) #define pushw(base, ptr, val, err_label) \ do { \ @@ -448,7 +452,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ - } while(0) + } while (0) #define pushl(base, ptr, val, err_label) \ do { \ @@ -465,7 +469,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ - } while(0) + } while (0) #define popb(base, ptr, err_label) \ ({ \ @@ -512,7 +516,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) * in userspace is always better than an Oops anyway.) [KD] */ static void do_int(struct kernel_vm86_regs *regs, int i, - unsigned char __user * ssp, unsigned short sp) + unsigned char __user *ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; @@ -521,7 +525,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, goto cannot_handle; if (is_revectored(i, &KVM86->int_revectored)) goto cannot_handle; - if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) + if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) @@ -543,15 +547,15 @@ cannot_handle: return_to_32bit(regs, VM86_INTx + (i << 8)); } -int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) +int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { if (VMPI.is_vm86pus) { - if ( (trapno==3) || (trapno==1) ) + if ((trapno == 3) || (trapno == 1)) return_to_32bit(regs, VM86_TRAP + (trapno << 8)); do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } - if (trapno !=1) + if (trapno != 1) return 1; /* we let this handle by the calling routine */ if (current->ptrace & PT_PTRACED) { unsigned long flags; @@ -566,7 +570,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno return 0; } -void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) +void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) { unsigned char opcode; unsigned char __user *csp; @@ -595,17 +599,17 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { - case 0x66: /* 32-bit data */ data32=1; break; - case 0x67: /* 32-bit address */ break; - case 0x2e: /* CS */ break; - case 0x3e: /* DS */ break; - case 0x26: /* ES */ break; - case 0x36: /* SS */ break; - case 0x65: /* GS */ break; - case 0x64: /* FS */ break; - case 0xf2: /* repnz */ break; - case 0xf3: /* rep */ break; - default: pref_done = 1; + case 0x66: /* 32-bit data */ data32 = 1; break; + case 0x67: /* 32-bit address */ break; + case 0x2e: /* CS */ break; + case 0x3e: /* DS */ break; + case 0x26: /* ES */ break; + case 0x36: /* SS */ break; + case 0x65: /* GS */ break; + case 0x64: /* FS */ break; + case 0xf2: /* repnz */ break; + case 0xf3: /* rep */ break; + default: pref_done = 1; } } while (!pref_done); @@ -628,7 +632,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) { unsigned long newflags; if (data32) { - newflags=popl(ssp, sp, simulate_sigsegv); + newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); @@ -636,20 +640,20 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) } IP(regs) = ip; CHECK_IF_IN_TRAP; - if (data32) { + if (data32) set_vflags_long(newflags, regs); - } else { + else set_vflags_short(newflags, regs); - } + VM86_FAULT_RETURN; } /* int xx */ case 0xcd: { - int intno=popb(csp, ip, simulate_sigsegv); + int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (VMPI.vm86dbg_active) { - if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) + if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) return_to_32bit(regs, VM86_INTx + (intno << 8)); } do_int(regs, intno, ssp, sp); @@ -663,9 +667,9 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) unsigned long newcs; unsigned long newflags; if (data32) { - newip=popl(ssp, sp, simulate_sigsegv); - newcs=popl(ssp, sp, simulate_sigsegv); - newflags=popl(ssp, sp, simulate_sigsegv); + newip = popl(ssp, sp, simulate_sigsegv); + newcs = popl(ssp, sp, simulate_sigsegv); + newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); @@ -734,18 +738,18 @@ static struct vm86_irqs { static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; -#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ +#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ - | (1 << SIGUNUSED) ) - + | (1 << SIGUNUSED)) + static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; - spin_lock_irqsave(&irqbits_lock, flags); + spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; - if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) + if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) @@ -759,7 +763,7 @@ static irqreturn_t irq_handler(int intno, void *dev_id) return IRQ_HANDLED; out: - spin_unlock_irqrestore(&irqbits_lock, flags); + spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; } @@ -770,9 +774,9 @@ static inline void free_vm86_irq(int irqnumber) free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; - spin_lock_irqsave(&irqbits_lock, flags); + spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); - spin_unlock_irqrestore(&irqbits_lock, flags); + spin_unlock_irqrestore(&irqbits_lock, flags); } void release_vm86_irqs(struct task_struct *task) @@ -788,10 +792,10 @@ static inline int get_and_reset_irq(int irqnumber) int bit; unsigned long flags; int ret = 0; - + if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; - spin_lock_irqsave(&irqbits_lock, flags); + spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { @@ -799,7 +803,7 @@ static inline int get_and_reset_irq(int irqnumber) ret = 1; } - spin_unlock_irqrestore(&irqbits_lock, flags); + spin_unlock_irqrestore(&irqbits_lock, flags); return ret; } -- cgit v1.2.1 From 714a9ac2efe5e20fdccfcf91b27dbd384bd34685 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:10:49 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/mcheck/non-fatal.c Before: total: 5 errors, 5 warnings, 91 lines checked After: total: 0 errors, 0 warnings, 94 lines checked No code changed: arch/x86/kernel/cpu/mcheck/non-fatal.o: text data bss dec hex filename 441 80 4 525 20d non-fatal.o.before 441 80 4 525 20d non-fatal.o.after md5: 137bc114d2020ad331d5e76444a2c7d3 non-fatal.o.before.asm 137bc114d2020ad331d5e76444a2c7d3 non-fatal.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/non-fatal.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index bf39409b3838..00ccb6c14ec2 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include @@ -26,23 +26,26 @@ static int firstbank; #define MCE_RATE 15*HZ /* timer rate is 15s */ -static void mce_checkregs (void *info) +static void mce_checkregs(void *info) { u32 low, high; int i; - for (i=firstbank; i Date: Fri, 22 Feb 2008 23:10:57 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/mcheck/winchip.c Before: total: 4 errors, 0 warnings, 36 lines checked After: total: 0 errors, 0 warnings, 36 lines checked No code changed: arch/x86/kernel/cpu/mcheck/winchip.o: text data bss dec hex filename 222 0 4 226 e2 winchip.o.before 222 0 4 226 e2 winchip.o.after md5: 9caefa12256c5f7d71ef324f6d01a2d5 winchip.o.before.asm 9caefa12256c5f7d71ef324f6d01a2d5 winchip.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/winchip.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 3d428d5afc52..f2be3e190c6b 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -8,14 +8,14 @@ #include #include -#include +#include #include #include #include "mce.h" /* Machine check handler for WinChip C6 */ -static void winchip_machine_check(struct pt_regs * regs, long error_code) +static void winchip_machine_check(struct pt_regs *regs, long error_code) { printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); add_taint(TAINT_MACHINE_CHECK); @@ -28,8 +28,8 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) machine_check_vector = winchip_machine_check; wmb(); rdmsr(MSR_IDT_FCR1, lo, hi); - lo|= (1<<2); /* Enable EIERRINT (int 18 MCE) */ - lo&= ~(1<<4); /* Enable MCE */ + lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ + lo &= ~(1<<4); /* Enable MCE */ wrmsr(MSR_IDT_FCR1, lo, hi); set_in_cr4(X86_CR4_MCE); printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n"); -- cgit v1.2.1 From d677759e99b51f50a75b2adfabb25e9d656ee33c Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:07 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/mcheck/mce_32.c Before: total: 10 errors, 3 warnings, 90 lines checked After: total: 0 errors, 3 warnings, 90 lines checked No code changed: arch/x86/kernel/cpu/mcheck/mce_32.o: text data bss dec hex filename 287 42 12 341 155 mce_32.o.before 287 42 12 341 155 mce_32.o.after md5: fede5ff8e6bc3f62e8e691ca6c45eb39 mce_32.o.before.asm fede5ff8e6bc3f62e8e691ca6c45eb39 mce_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_32.c | 50 ++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index a5182dcd94ae..774d87cfd8cd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c @@ -10,20 +10,20 @@ #include #include -#include +#include #include #include #include "mce.h" -int mce_disabled = 0; +int mce_disabled; int nr_mce_banks; EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ /* Handle unconfigured int18 (should never happen) */ -static void unexpected_machine_check(struct pt_regs * regs, long error_code) -{ +static void unexpected_machine_check(struct pt_regs *regs, long error_code) +{ printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); } @@ -33,30 +33,30 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_mac /* This has to be run for each processor */ void mcheck_init(struct cpuinfo_x86 *c) { - if (mce_disabled==1) + if (mce_disabled == 1) return; switch (c->x86_vendor) { - case X86_VENDOR_AMD: - amd_mcheck_init(c); - break; - - case X86_VENDOR_INTEL: - if (c->x86==5) - intel_p5_mcheck_init(c); - if (c->x86==6) - intel_p6_mcheck_init(c); - if (c->x86==15) - intel_p4_mcheck_init(c); - break; - - case X86_VENDOR_CENTAUR: - if (c->x86==5) - winchip_mcheck_init(c); - break; - - default: - break; + case X86_VENDOR_AMD: + amd_mcheck_init(c); + break; + + case X86_VENDOR_INTEL: + if (c->x86 == 5) + intel_p5_mcheck_init(c); + if (c->x86 == 6) + intel_p6_mcheck_init(c); + if (c->x86 == 15) + intel_p4_mcheck_init(c); + break; + + case X86_VENDOR_CENTAUR: + if (c->x86 == 5) + winchip_mcheck_init(c); + break; + + default: + break; } } -- cgit v1.2.1 From 7030760ae5d29d637d1e962c70d1d9c58be3306f Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:12 +0100 Subject: x86: coding style fixes to arch/x86/boot/cpucheck.c Before: total: 30 errors, 0 warnings, 262 lines checked After: total: 0 errors, 0 warnings, 262 lines checked No code changed: arch/x86/boot/cpucheck.o: text data bss dec hex filename 989 0 96 1085 43d cpucheck.o.before 989 0 96 1085 43d cpucheck.o.after md5: 06634cfefb8438fa284ff903b4cafbce cpucheck.o.before.asm 06634cfefb8438fa284ff903b4cafbce cpucheck.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/boot/cpucheck.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 769065bd23d7..2462c88689ed 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -56,27 +56,27 @@ static const u32 req_flags[NCAPINTS] = REQUIRED_MASK7, }; -#define A32(a,b,c,d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) +#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) static int is_amd(void) { - return cpu_vendor[0] == A32('A','u','t','h') && - cpu_vendor[1] == A32('e','n','t','i') && - cpu_vendor[2] == A32('c','A','M','D'); + return cpu_vendor[0] == A32('A', 'u', 't', 'h') && + cpu_vendor[1] == A32('e', 'n', 't', 'i') && + cpu_vendor[2] == A32('c', 'A', 'M', 'D'); } static int is_centaur(void) { - return cpu_vendor[0] == A32('C','e','n','t') && - cpu_vendor[1] == A32('a','u','r','H') && - cpu_vendor[2] == A32('a','u','l','s'); + return cpu_vendor[0] == A32('C', 'e', 'n', 't') && + cpu_vendor[1] == A32('a', 'u', 'r', 'H') && + cpu_vendor[2] == A32('a', 'u', 'l', 's'); } static int is_transmeta(void) { - return cpu_vendor[0] == A32('G','e','n','u') && - cpu_vendor[1] == A32('i','n','e','T') && - cpu_vendor[2] == A32('M','x','8','6'); + return cpu_vendor[0] == A32('G', 'e', 'n', 'u') && + cpu_vendor[1] == A32('i', 'n', 'e', 'T') && + cpu_vendor[2] == A32('M', 'x', '8', '6'); } static int has_fpu(void) -- cgit v1.2.1 From adf85265b455f096fa9caf4aea51f274cdaca3c6 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:23 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/cyrix.c Before: total: 46 errors, 10 warnings, 450 lines checked After: total: 1 errors, 10 warnings, 449 lines checked No code changed: arch/x86/kernel/cpu/cyrix.o: text data bss dec hex filename 2048 908 4 2960 b90 cyrix.o.before 2048 908 4 2960 b90 cyrix.o.after md5: 9add5e69dbd788f91ff24eea8462dad7 cyrix.o.before.asm 9add5e69dbd788f91ff24eea8462dad7 cyrix.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cyrix.c | 113 ++++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 57 deletions(-) diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 9c4ee98f2cb8..f7085bde4c28 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -19,7 +19,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) { unsigned char ccr2, ccr3; unsigned long flags; - + /* we test for DEVID by checking whether CCR3 is writable */ local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); @@ -37,8 +37,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) setCx86(CX86_CCR2, ccr2); *dir0 = 0xfe; } - } - else { + } else { setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ /* read DIR0 and DIR1 CPU registers */ @@ -86,7 +85,7 @@ static char cyrix_model_mult2[] __cpuinitdata = "12233445"; static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) { unsigned long flags; - + if (Cx86_dir0_msb == 3) { unsigned char ccr3, ccr5; @@ -132,7 +131,7 @@ static void __cpuinit set_cx86_memwb(void) /* set 'Not Write-through' */ write_cr0(read_cr0() | X86_CR0_NW); /* CCR2 bit 2: lock NW bit and set WT1 */ - setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); } static void __cpuinit set_cx86_inc(void) @@ -148,7 +147,7 @@ static void __cpuinit set_cx86_inc(void) setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); /* PCR0 -- Performance Control */ /* Incrementor Margin 10 */ - setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); + setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ } @@ -167,16 +166,16 @@ static void __cpuinit geode_configure(void) ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ - + /* FPU fast, DTE cache, Mem bypass */ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ - + set_cx86_memwb(); - set_cx86_reorder(); + set_cx86_reorder(); set_cx86_inc(); - + local_irq_restore(flags); } @@ -187,12 +186,14 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) char *buf = c->x86_model_id; const char *p = NULL; - /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; - 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ + /* + * Bit 31 in normal CPUID used for nonstandard 3DNow ID; + * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway + */ clear_bit(0*32+31, c->x86_capability); /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ - if ( test_bit(1*32+24, c->x86_capability) ) { + if (test_bit(1*32+24, c->x86_capability)) { clear_bit(1*32+24, c->x86_capability); set_bit(X86_FEATURE_CXMMX, c->x86_capability); } @@ -213,7 +214,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) * the model, multiplier and stepping. Black magic included, * to make the silicon step/rev numbers match the printed ones. */ - + switch (dir0_msn) { unsigned char tmp; @@ -250,17 +251,18 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) #ifdef CONFIG_PCI { u32 vendor, device; - /* It isn't really a PCI quirk directly, but the cure is the - same. The MediaGX has deep magic SMM stuff that handles the - SB emulation. It throws away the fifo on disable_dma() which - is wrong and ruins the audio. - - Bug2: VSA1 has a wrap bug so that using maximum sized DMA - causes bad things. According to NatSemi VSA2 has another - bug to do with 'hlt'. I've not seen any boards using VSA2 - and X doesn't seem to support it either so who cares 8). - VSA1 we work around however. - */ + /* + * It isn't really a PCI quirk directly, but the cure is the + * same. The MediaGX has deep magic SMM stuff that handles the + * SB emulation. It throws away the fifo on disable_dma() which + * is wrong and ruins the audio. + * + * Bug2: VSA1 has a wrap bug so that using maximum sized DMA + * causes bad things. According to NatSemi VSA2 has another + * bug to do with 'hlt'. I've not seen any boards using VSA2 + * and X doesn't seem to support it either so who cares 8). + * VSA1 we work around however. + */ printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); isa_dma_bridge_buggy = 2; @@ -273,52 +275,48 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) /* * The 5510/5520 companion chips have a funky PIT. - */ + */ if (vendor == PCI_VENDOR_ID_CYRIX && (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) mark_tsc_unstable("cyrix 5510/5520 detected"); } #endif - c->x86_cache_size=16; /* Yep 16K integrated cache thats it */ + c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ /* GXm supports extended cpuid levels 'ala' AMD */ if (c->cpuid_level == 2) { /* Enable cxMMX extensions (GX1 Datasheet 54) */ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); - + /* * GXm : 0x30 ... 0x5f GXm datasheet 51 * GXlv: 0x6x GXlv datasheet 54 * ? : 0x7x * GX1 : 0x8x GX1 datasheet 56 */ - if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f)) + if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) geode_configure(); get_model_name(c); /* get CPU marketing name */ return; - } - else { /* MediaGX */ + } else { /* MediaGX */ Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; p = Cx86_cb+2; c->x86_model = (dir1 & 0x20) ? 1 : 2; } break; - case 5: /* 6x86MX/M II */ - if (dir1 > 7) - { + case 5: /* 6x86MX/M II */ + if (dir1 > 7) { dir0_msn++; /* M II */ /* Enable MMX extensions (App note 108) */ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); - } - else - { + } else { c->coma_bug = 1; /* 6x86MX, it has the bug. */ } tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; p = Cx86_cb+tmp; - if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) + if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) (c->x86_model)++; /* Emulate MTRRs using Cyrix's ARRs. */ set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); @@ -343,7 +341,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) break; } strcpy(buf, Cx86_model[dir0_msn & 7]); - if (p) strcat(buf, p); + if (p) + strcat(buf, p); return; } @@ -352,7 +351,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) */ static void __cpuinit init_nsc(struct cpuinfo_x86 *c) { - /* There may be GX1 processors in the wild that are branded + /* + * There may be GX1 processors in the wild that are branded * NSC and not Cyrix. * * This function only handles the GX processor, and kicks every @@ -377,7 +377,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c) * by the fact that they preserve the flags across the division of 5/2. * PII and PPro exhibit this behavior too, but they have cpuid available. */ - + /* * Perform the Cyrix 5/2 test. A Cyrix won't change * the flags, while other 486 chips will. @@ -398,27 +398,26 @@ static inline int test_cyrix_52div(void) return (unsigned char) (test >> 8) == 0x02; } -static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) +static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) { /* Detect Cyrix with disabled CPUID */ - if ( c->x86 == 4 && test_cyrix_52div() ) { + if (c->x86 == 4 && test_cyrix_52div()) { unsigned char dir0, dir1; - + strcpy(c->x86_vendor_id, "CyrixInstead"); - c->x86_vendor = X86_VENDOR_CYRIX; - - /* Actually enable cpuid on the older cyrix */ - - /* Retrieve CPU revisions */ - + c->x86_vendor = X86_VENDOR_CYRIX; + + /* Actually enable cpuid on the older cyrix */ + + /* Retrieve CPU revisions */ + do_cyrix_devid(&dir0, &dir1); - dir0>>=4; - + dir0 >>= 4; + /* Check it is an affected model */ - - if (dir0 == 5 || dir0 == 3) - { + + if (dir0 == 5 || dir0 == 3) { unsigned char ccr3; unsigned long flags; printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); @@ -434,7 +433,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { .c_vendor = "Cyrix", - .c_ident = { "CyrixInstead" }, + .c_ident = { "CyrixInstead" }, .c_init = init_cyrix, .c_identify = cyrix_identify, }; @@ -443,7 +442,7 @@ cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); static struct cpu_dev nsc_cpu_dev __cpuinitdata = { .c_vendor = "NSC", - .c_ident = { "Geode by NSC" }, + .c_ident = { "Geode by NSC" }, .c_init = init_nsc, }; -- cgit v1.2.1 From 0fb90297dc69b87333c0afae537b1bff847e0397 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:34 +0100 Subject: x86: coding style fixes to arch/x86/oprofile/nmi_timer_int.c Before: total: 3 errors, 0 warnings, 69 lines checked After: total: 0 errors, 0 warnings, 69 lines checked No code changed: arch/x86/oprofile/nmi_timer_int.o: text data bss dec hex filename 180 12 0 192 c0 nmi_timer_int.o.before 180 12 0 192 c0 nmi_timer_int.o.after md5: 0433c31d758e81da574e01722a8036ea nmi_timer_int.o.before.asm 0433c31d758e81da574e01722a8036ea nmi_timer_int.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/oprofile/nmi_timer_int.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c index 1418e36ae7ab..e3ecb71b5790 100644 --- a/arch/x86/oprofile/nmi_timer_int.c +++ b/arch/x86/oprofile/nmi_timer_int.c @@ -17,14 +17,14 @@ #include #include #include - + static int profile_timer_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - switch(val) { + switch (val) { case DIE_NMI: oprofile_add_sample(args->regs, 0); ret = NOTIFY_STOP; @@ -56,7 +56,7 @@ static void timer_stop(void) } -int __init op_nmi_timer_init(struct oprofile_operations * ops) +int __init op_nmi_timer_init(struct oprofile_operations *ops) { if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0)) return -ENODEV; -- cgit v1.2.1 From 94a9fa41421625caedc328a2b0a0d5c78919a20f Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:52 +0100 Subject: x86: coding style fixes to arch/x86/kernel/msr.c Before: total: 2 errors, 0 warnings, 231 lines checked After: total: 0 errors, 0 warnings, 231 lines checked No code changed: arch/x86/kernel/msr.o: text data bss dec hex filename 1199 12 4 1215 4bf msr.o.before 1199 12 4 1215 4bf msr.o.after md5: 604be0d07d829bc52a9346babd084bdc msr.o.before.asm 604be0d07d829bc52a9346babd084bdc msr.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/msr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index af51ea8400b2..4dfb40530057 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -65,8 +65,8 @@ static loff_t msr_seek(struct file *file, loff_t offset, int orig) return ret; } -static ssize_t msr_read(struct file *file, char __user * buf, - size_t count, loff_t * ppos) +static ssize_t msr_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { u32 __user *tmp = (u32 __user *) buf; u32 data[2]; -- cgit v1.2.1 From 7ebed39ff7eec204850736a662828da0b942b8c0 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:59 +0100 Subject: x86: coding style fixes to arch/x86/xen/multicalls.c Before: total: 2 errors, 2 warnings, 138 lines checked After: total: 0 errors, 2 warnings, 138 lines checked No code changed: arch/x86/xen/multicalls.o: text data bss dec hex filename 887 2832 0 3719 e87 multicalls.o.before 887 2832 0 3719 e87 multicalls.o.after md5: cf6d72d9db6dc5a3ebe01eec9f05e95f multicalls.o.before.asm cf6d72d9db6dc5a3ebe01eec9f05e95f multicalls.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/xen/multicalls.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 5e6f36f6d876..5791eb2e3750 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -76,7 +76,7 @@ void xen_mc_flush(void) if (ret) { printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", ret, smp_processor_id()); - for(i = 0; i < b->mcidx; i++) { + for (i = 0; i < b->mcidx; i++) { printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n", i+1, b->mcidx, b->debug[i].op, @@ -93,7 +93,7 @@ void xen_mc_flush(void) local_irq_restore(flags); - for(i = 0; i < b->cbidx; i++) { + for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; (*cb->fn)(cb->data); -- cgit v1.2.1 From db96598494f2a2ab9ab79999dd8fc365a906914b Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Sun, 24 Feb 2008 11:57:22 +0100 Subject: x86: coding style fixes to arch/x86/power/cpu_32.c Before: total: 15 errors, 3 warnings, 133 lines checked After: total: 0 errors, 0 warnings, 138 lines checked No code changed: arch/x86/power/cpu_32.o: text data bss dec hex filename 739 0 84 823 337 cpu_32.o.before 739 0 84 823 337 cpu_32.o.after md5: eb0726223a5e26b195e65f0ae2c0ec66 cpu_32.o.before.asm eb0726223a5e26b195e65f0ae2c0ec66 cpu_32.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/power/cpu_32.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 7f9c6da04a4c..7dc5d5cf50a2 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -27,17 +27,17 @@ static void __save_processor_state(struct saved_context *ctxt) /* * descriptor tables */ - store_gdt(&ctxt->gdt); - store_idt(&ctxt->idt); - store_tr(ctxt->tr); + store_gdt(&ctxt->gdt); + store_idt(&ctxt->idt); + store_tr(ctxt->tr); /* * segment registers */ - savesegment(es, ctxt->es); - savesegment(fs, ctxt->fs); - savesegment(gs, ctxt->gs); - savesegment(ss, ctxt->ss); + savesegment(es, ctxt->es); + savesegment(fs, ctxt->fs); + savesegment(gs, ctxt->gs); + savesegment(ss, ctxt->ss); /* * control registers @@ -48,10 +48,12 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr4 = read_cr4(); } +/* Needed by apm.c */ void save_processor_state(void) { __save_processor_state(&saved_context); } +EXPORT_SYMBOL(save_processor_state); static void do_fpu_end(void) { @@ -64,9 +66,14 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct * t = &per_cpu(init_tss, cpu); + struct tss_struct *t = &per_cpu(init_tss, cpu); - set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ + set_tss_desc(cpu, t); /* + * This just modifies memory; should not be + * necessary. But... This is necessary, because + * 386 hardware has concept of busy TSS or some + * similar stupidity. + */ load_TR_desc(); /* This does ltr */ load_LDT(¤t->active_mm->context); /* This does lldt */ @@ -100,16 +107,16 @@ static void __restore_processor_state(struct saved_context *ctxt) * now restore the descriptor tables to their proper values * ltr is done i fix_processor_context(). */ - load_gdt(&ctxt->gdt); - load_idt(&ctxt->idt); + load_gdt(&ctxt->gdt); + load_idt(&ctxt->idt); /* * segment registers */ - loadsegment(es, ctxt->es); - loadsegment(fs, ctxt->fs); - loadsegment(gs, ctxt->gs); - loadsegment(ss, ctxt->ss); + loadsegment(es, ctxt->es); + loadsegment(fs, ctxt->fs); + loadsegment(gs, ctxt->gs); + loadsegment(ss, ctxt->ss); /* * sysenter MSRs @@ -123,11 +130,9 @@ static void __restore_processor_state(struct saved_context *ctxt) mcheck_init(&boot_cpu_data); } +/* Needed by apm.c */ void restore_processor_state(void) { __restore_processor_state(&saved_context); } - -/* Needed by apm.c */ -EXPORT_SYMBOL(save_processor_state); EXPORT_SYMBOL(restore_processor_state); -- cgit v1.2.1 From 34048c9e927d5ae29c6ba802c826370de2a046d2 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Sun, 24 Feb 2008 11:58:13 +0100 Subject: x86: coding style fixes to arch/x86/kernel/cpu/common.c Before: total: 55 errors, 6 warnings, 727 lines checked After: total: 0 errors, 3 warnings, 734 lines checked No code changed: arch/x86/kernel/cpu/common.o: text data bss dec hex filename 3500 4611 44 8155 1fdb common.o.before 3500 4611 44 8155 1fdb common.o.after md5: e37091f11fbeb682c0db152ac3022a38 common.o.before.asm e37091f11fbeb682c0db152ac3022a38 common.o.after.asm Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 105 +++++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 49 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0fd6be154d5d..ef9e31b89b35 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -62,9 +62,9 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; static int cachesize_override __cpuinitdata = -1; static int disable_x86_serial_nr __cpuinitdata = 1; -struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; +struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; -static void __cpuinit default_init(struct cpuinfo_x86 * c) +static void __cpuinit default_init(struct cpuinfo_x86 *c) { /* Not much we can do here... */ /* Check if at least it has cpuid */ @@ -81,11 +81,11 @@ static struct cpu_dev __cpuinitdata default_cpu = { .c_init = default_init, .c_vendor = "Unknown", }; -static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; +static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; static int __init cachesize_setup(char *str) { - get_option (&str, &cachesize_override); + get_option(&str, &cachesize_override); return 1; } __setup("cachesize=", cachesize_setup); @@ -107,12 +107,12 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) /* Intel chips right-justify this string for some dumb reason; undo that brain damage */ p = q = &c->x86_model_id[0]; - while ( *p == ' ' ) + while (*p == ' ') p++; - if ( p != q ) { - while ( *p ) + if (p != q) { + while (*p) *q++ = *p++; - while ( q <= &c->x86_model_id[48] ) + while (q <= &c->x86_model_id[48]) *q++ = '\0'; /* Zero-pad the rest */ } @@ -130,7 +130,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); - c->x86_cache_size=(ecx>>24)+(edx>>24); + c->x86_cache_size = (ecx>>24)+(edx>>24); } if (n < 0x80000006) /* Some chips just has a large L1. */ @@ -138,16 +138,16 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) ecx = cpuid_ecx(0x80000006); l2size = ecx >> 16; - + /* do processor-specific cache resizing */ if (this_cpu->c_size_cache) - l2size = this_cpu->c_size_cache(c,l2size); + l2size = this_cpu->c_size_cache(c, l2size); /* Allow user to override all this if necessary. */ if (cachesize_override != -1) l2size = cachesize_override; - if ( l2size == 0 ) + if (l2size == 0) return; /* Again, no L2 cache is possible */ c->x86_cache_size = l2size; @@ -156,16 +156,19 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) l2size, ecx & 0xFF); } -/* Naming convention should be: [()] */ -/* This table only is used unless init_() below doesn't set it; */ -/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ +/* + * Naming convention should be: [()] + * This table only is used unless init_() below doesn't set it; + * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used + * + */ /* Look up CPU names by table lookup. */ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) { struct cpu_model_info *info; - if ( c->x86_model >= 16 ) + if (c->x86_model >= 16) return NULL; /* Range check */ if (!this_cpu) @@ -190,9 +193,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_devs[i]) { - if (!strcmp(v,cpu_devs[i]->c_ident[0]) || - (cpu_devs[i]->c_ident[1] && - !strcmp(v,cpu_devs[i]->c_ident[1]))) { + if (!strcmp(v, cpu_devs[i]->c_ident[0]) || + (cpu_devs[i]->c_ident[1] && + !strcmp(v, cpu_devs[i]->c_ident[1]))) { c->x86_vendor = i; if (!early) this_cpu = cpu_devs[i]; @@ -210,7 +213,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) } -static int __init x86_fxsr_setup(char * s) +static int __init x86_fxsr_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_FXSR); setup_clear_cpu_cap(X86_FEATURE_XMM); @@ -219,7 +222,7 @@ static int __init x86_fxsr_setup(char * s) __setup("nofxsr", x86_fxsr_setup); -static int __init x86_sep_setup(char * s) +static int __init x86_sep_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_SEP); return 1; @@ -308,12 +311,15 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) } -/* Do minimum CPU detection early. - Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. - The others are not touched to avoid unwanted side effects. - - WARNING: this function is only called on the BP. Don't add code here - that is supposed to run on all CPUs. */ +/* + * Do minimum CPU detection early. + * Fields really needed: vendor, cpuid_level, family, model, mask, + * cache alignment. + * The others are not touched to avoid unwanted side effects. + * + * WARNING: this function is only called on the BP. Don't add code here + * that is supposed to run on all CPUs. + */ static void __init early_cpu_detect(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -335,7 +341,7 @@ static void __init early_cpu_detect(void) early_get_cap(c); } -static void __cpuinit generic_identify(struct cpuinfo_x86 * c) +static void __cpuinit generic_identify(struct cpuinfo_x86 *c) { u32 tfms, xlvl; unsigned int ebx; @@ -346,13 +352,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) (unsigned int *)&c->x86_vendor_id[0], (unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[4]); - + get_cpu_vendor(c, 0); /* Initialize the standard set of capabilities */ /* Note that the vendor-specific code below might override */ - /* Intel-defined flags: level 0x00000001 */ - if ( c->cpuid_level >= 0x00000001 ) { + if (c->cpuid_level >= 0x00000001) { u32 capability, excap; cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; @@ -378,12 +383,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); - if ( (xlvl & 0xffff0000) == 0x80000000 ) { - if ( xlvl >= 0x80000001 ) { + if ((xlvl & 0xffff0000) == 0x80000000) { + if (xlvl >= 0x80000001) { c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001); } - if ( xlvl >= 0x80000004 ) + if (xlvl >= 0x80000004) get_model_name(c); /* Default name */ } @@ -397,12 +402,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { - if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { + if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { /* Disable processor serial number */ - unsigned long lo,hi; - rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + unsigned long lo, hi; + rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); lo |= 0x200000; - wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); printk(KERN_NOTICE "CPU serial number disabled.\n"); clear_bit(X86_FEATURE_PN, c->x86_capability); @@ -439,9 +444,11 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) memset(&c->x86_capability, 0, sizeof c->x86_capability); if (!have_cpuid_p()) { - /* First of all, decide if this is a 486 or higher */ - /* It's a 486 if we can modify the AC flag */ - if ( flag_is_changeable_p(X86_EFLAGS_AC) ) + /* + * First of all, decide if this is a 486 or higher + * It's a 486 if we can modify the AC flag + */ + if (flag_is_changeable_p(X86_EFLAGS_AC)) c->x86 = 4; else c->x86 = 3; @@ -474,10 +481,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) */ /* If the model name is still unset, do table lookup. */ - if ( !c->x86_model_id[0] ) { + if (!c->x86_model_id[0]) { char *p; p = table_lookup_model(c); - if ( p ) + if (p) strcpy(c->x86_model_id, p); else /* Last resort... */ @@ -491,9 +498,9 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) * common between the CPUs. The first time this routine gets * executed, c == &boot_cpu_data. */ - if ( c != &boot_cpu_data ) { + if (c != &boot_cpu_data) { /* AND the already accumulated flags with these */ - for ( i = 0 ; i < NCAPINTS ; i++ ) + for (i = 0 ; i < NCAPINTS ; i++) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } @@ -537,7 +544,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); - } else if (smp_num_siblings > 1 ) { + } else if (smp_num_siblings > 1) { if (smp_num_siblings > NR_CPUS) { printk(KERN_WARNING "CPU: Unsupported number of the " @@ -592,7 +599,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) else printk("%s", c->x86_model_id); - if (c->x86_mask || c->cpuid_level >= 0) + if (c->x86_mask || c->cpuid_level >= 0) printk(" stepping %02x\n", c->x86_mask); else printk("\n"); @@ -653,7 +660,7 @@ void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct * t = &per_cpu(init_tss, cpu); + struct tss_struct *t = &per_cpu(init_tss, cpu); struct thread_struct *thread = &curr->thread; if (cpu_test_and_set(cpu, cpu_initialized)) { @@ -679,7 +686,7 @@ void __cpuinit cpu_init(void) enter_lazy_tlb(&init_mm, curr); load_sp0(t, thread); - set_tss_desc(cpu,t); + set_tss_desc(cpu, t); load_TR_desc(); load_LDT(&init_mm.context); -- cgit v1.2.1 From f8fffa458368ed3d57385698f775880db629bd1a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 24 Feb 2008 21:36:28 -0800 Subject: x86: apic_is_clustered_box for vsmp quad core 8 socket system will have apic id lifting.the apic id range could be [4, 0x23]. and apic_is_clustered_box will think that need to three clusters and that is larger than 2. So it is treated as a clustered_box. and will get: Marking TSC unstable due to TSCs unsynchronized even if the CPUs have X86_FEATURE_CONSTANT_TSC set. this quick fix will check if the cpu is from AMD. but vsmp still needs that checking... this patch is fix to make sure that vsmp not to be passed. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 4 ++-- arch/x86/kernel/vsmp_64.c | 27 +++++++++++++++++++++------ include/asm-x86/apic.h | 5 +++++ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index ac2405ed504d..f6eb01d8923a 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -1182,9 +1182,9 @@ __cpuinit int apic_is_clustered_box(void) * there is not this kind of box with AMD CPU yet. * Some AMD box with quadcore cpu and 8 sockets apicid * will be [4, 0x23] or [8, 0x27] could be thought to - * have three apic_clusters. So go out early. + * vsmp box still need checking... */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (!is_vsmp_box() && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) return 0; bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 54202b1805da..a00961d42e75 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -72,19 +72,34 @@ static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, } +static int vsmp = -1; + +int is_vsmp_box(void) +{ + if (vsmp != -1) + return vsmp; + + vsmp = 0; + if (!early_pci_allowed()) + return vsmp; + + /* Check if we are running on a ScaleMP vSMP box */ + if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == + (PCI_VENDOR_ID_SCALEMP || (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) + vsmp = 1; + + return vsmp; +} + void __init vsmp_init(void) { void *address; unsigned int cap, ctl, cfg; - if (!early_pci_allowed()) + if (!is_vsmp_box()) return; - /* Check if we are running on a ScaleMP vSMP box */ - if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != - PCI_VENDOR_ID_SCALEMP) || - (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != - PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) + if (!early_pci_allowed()) return; /* If we are, use the distinguished irq functions */ diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index bcfc07fd3661..f0321a427e16 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -51,12 +51,17 @@ extern unsigned boot_cpu_id; */ #ifdef CONFIG_PARAVIRT #include +extern int is_vsmp_box(void); #else #define apic_write native_apic_write #define apic_write_atomic native_apic_write_atomic #define apic_read native_apic_read #define setup_boot_clock setup_boot_APIC_clock #define setup_secondary_clock setup_secondary_APIC_clock +static int inline is_vsmp_box(void) +{ + return 0; +} #endif static inline void native_apic_write(unsigned long reg, u32 v) -- cgit v1.2.1 From 4edbfa789b32bebf0b7fb71d5b2913a91ac4d0b1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 25 Feb 2008 11:44:07 +0100 Subject: x86, rdc321x: remove watchdog file remove the arch/x86/mach-rdc321x/wdt.c file. Signed-off-by: Florian Fainelli Signed-off-by: Ingo Molnar --- arch/x86/mach-rdc321x/Makefile | 2 +- arch/x86/mach-rdc321x/wdt.c | 275 ----------------------------------------- 2 files changed, 1 insertion(+), 276 deletions(-) delete mode 100644 arch/x86/mach-rdc321x/wdt.c diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile index 1faac8125e3d..8325b4ca431c 100644 --- a/arch/x86/mach-rdc321x/Makefile +++ b/arch/x86/mach-rdc321x/Makefile @@ -1,5 +1,5 @@ # # Makefile for the RDC321x specific parts of the kernel # -obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o wdt.o +obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o diff --git a/arch/x86/mach-rdc321x/wdt.c b/arch/x86/mach-rdc321x/wdt.c deleted file mode 100644 index ec5625ae7061..000000000000 --- a/arch/x86/mach-rdc321x/wdt.c +++ /dev/null @@ -1,275 +0,0 @@ -/* - * RDC321x watchdog driver - * - * Copyright (C) 2007 Florian Fainelli - * - * This driver is highly inspired from the cpu5_wdt driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define RDC_WDT_MASK 0x80000000 /* Mask */ -#define RDC_WDT_EN 0x00800000 /* Enable bit */ -#define RDC_WDT_WTI 0x00200000 /* Generate CPU reset/NMI/WDT on timeout */ -#define RDC_WDT_RST 0x00100000 /* Reset bit */ -#define RDC_WDT_WIF 0x00040000 /* WDT IRQ Flag */ -#define RDC_WDT_IRT 0x00000100 /* IRQ Routing table */ -#define RDC_WDT_CNT 0x00000001 /* WDT count */ - -#define RDC_CLS_TMR 0x80003844 /* Clear timer */ - -#define RDC_WDT_INTERVAL (HZ/10+1) - -int nowayout = WATCHDOG_NOWAYOUT; -module_param(nowayout, int, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); - -static int ticks = 1000; - -/* some device data */ - -static struct { - struct completion stop; - volatile int running; - struct timer_list timer; - volatile int queue; - int default_ticks; - unsigned long inuse; -} rdc321x_wdt_device; - -/* generic helper functions */ - -static void rdc321x_wdt_trigger(unsigned long unused) -{ - if (rdc321x_wdt_device.running) - ticks--; - - /* keep watchdog alive */ - outl(RDC_WDT_EN|inl(RDC3210_CFGREG_DATA), RDC3210_CFGREG_DATA); - - /* requeue?? */ - if (rdc321x_wdt_device.queue && ticks) - mod_timer(&rdc321x_wdt_device.timer, - jiffies + RDC_WDT_INTERVAL); - else { - /* ticks doesn't matter anyway */ - complete(&rdc321x_wdt_device.stop); - } - -} - -static void rdc321x_wdt_reset(void) -{ - ticks = rdc321x_wdt_device.default_ticks; -} - -static void rdc321x_wdt_start(void) -{ - if (!rdc321x_wdt_device.queue) { - rdc321x_wdt_device.queue = 1; - - /* Clear the timer */ - outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR); - - /* Enable watchdog and set the timeout to 81.92 us */ - outl(RDC_WDT_EN|RDC_WDT_CNT, RDC3210_CFGREG_DATA); - - mod_timer(&rdc321x_wdt_device.timer, - jiffies + RDC_WDT_INTERVAL); - } - - /* if process dies, counter is not decremented */ - rdc321x_wdt_device.running++; -} - -static int rdc321x_wdt_stop(void) -{ - if (rdc321x_wdt_device.running) - rdc321x_wdt_device.running = 0; - - ticks = rdc321x_wdt_device.default_ticks; - - return -EIO; -} - -/* filesystem operations */ - -static int rdc321x_wdt_open(struct inode *inode, struct file *file) -{ - if (test_and_set_bit(0, &rdc321x_wdt_device.inuse)) - return -EBUSY; - - return nonseekable_open(inode, file); -} - -static int rdc321x_wdt_release(struct inode *inode, struct file *file) -{ - clear_bit(0, &rdc321x_wdt_device.inuse); - return 0; -} - -static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - void __user *argp = (void __user *)arg; - unsigned int value; - static struct watchdog_info ident = { - .options = WDIOF_CARDRESET, - .identity = "RDC321x WDT", - }; - - switch (cmd) { - case WDIOC_KEEPALIVE: - rdc321x_wdt_reset(); - break; - case WDIOC_GETSTATUS: - /* Read the value from the DATA register */ - value = inl(RDC3210_CFGREG_DATA); - if (copy_to_user(argp, &value, sizeof(int))) - return -EFAULT; - break; - case WDIOC_GETSUPPORT: - if (copy_to_user(argp, &ident, sizeof(ident))) - return -EFAULT; - break; - case WDIOC_SETOPTIONS: - if (copy_from_user(&value, argp, sizeof(int))) - return -EFAULT; - switch (value) { - case WDIOS_ENABLECARD: - rdc321x_wdt_start(); - break; - case WDIOS_DISABLECARD: - return rdc321x_wdt_stop(); - default: - return -EINVAL; - } - break; - default: - return -ENOTTY; - } - return 0; -} - -static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - if (!count) - return -EIO; - - rdc321x_wdt_reset(); - - return count; -} - -static const struct file_operations rdc321x_wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .ioctl = rdc321x_wdt_ioctl, - .open = rdc321x_wdt_open, - .write = rdc321x_wdt_write, - .release = rdc321x_wdt_release, -}; - -static struct miscdevice rdc321x_wdt_misc = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &rdc321x_wdt_fops, -}; - -static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) -{ - int err; - - err = misc_register(&rdc321x_wdt_misc); - if (err < 0) { - printk(KERN_ERR PFX "watchdog misc_register failed\n"); - return err; - } - - /* Reset the watchdog */ - outl(RDC_WDT_RST, RDC3210_CFGREG_DATA); - - init_completion(&rdc321x_wdt_device.stop); - rdc321x_wdt_device.queue = 0; - - clear_bit(0, &rdc321x_wdt_device.inuse); - - setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0); - - rdc321x_wdt_device.default_ticks = ticks; - - printk(KERN_INFO PFX "watchdog init success\n"); - - return 0; -} - -static int rdc321x_wdt_remove(struct platform_device *pdev) -{ - if (rdc321x_wdt_device.queue) { - rdc321x_wdt_device.queue = 0; - wait_for_completion(&rdc321x_wdt_device.stop); - } - - misc_deregister(&rdc321x_wdt_misc); - - return 0; -} - -static struct platform_driver rdc321x_wdt_driver = { - .probe = rdc321x_wdt_probe, - .remove = rdc321x_wdt_remove, - .driver = { - .owner = THIS_MODULE, - .name = "rdc321x-wdt", - }, -}; - -static int __init rdc321x_wdt_init(void) -{ - return platform_driver_register(&rdc321x_wdt_driver); -} - -static void __exit rdc321x_wdt_exit(void) -{ - platform_driver_unregister(&rdc321x_wdt_driver); -} - -module_init(rdc321x_wdt_init); -module_exit(rdc321x_wdt_exit); - -MODULE_AUTHOR("Florian Fainelli "); -MODULE_DESCRIPTION("RDC321x watchdog driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); -- cgit v1.2.1 From d32de19ca9076eeed1e9bfd678e550fbad437535 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 25 Feb 2008 23:20:01 -0800 Subject: x86: remove wrong setting about CONSTANT_TSC for intel cpu early_init_intel() on 64-bit is introduced by commit 2b16a2353814a513cdb5c5c739b76a19d7ea39ce Author: Andi Kleen Date: Wed Jan 30 13:32:40 2008 +0100 x86: move X86_FEATURE_CONSTANT_TSC into early cpu feature detection sets CONSTANT_TSC for intel cpus - but it is already set in init_intel(). don't need to set that two times in early_init_intel() and init_intel(). this patch removes the init_intel() one. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index e3cb3ea96ca1..164200257b68 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -866,9 +866,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) if (c->x86 == 15) c->x86_cache_alignment = c->x86_clflush_size * 2; - if ((c->x86 == 0xf && c->x86_model >= 0x03) || - (c->x86 == 0x6 && c->x86_model >= 0x0e)) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); -- cgit v1.2.1 From 0f8d2b926d15a68eac9c19edfdcb58a5d80b2960 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:34:21 +0100 Subject: x86: clean up cpu capabilities accesses introduce test_cpu_cap() for raw access to the real CPU capabilities as they are present in x86_capability. (cpu_has() will shortcut certain tests during build-time) Signed-off-by: Ingo Molnar --- include/asm-x86/cpufeature.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 065e92966c7c..1e3102eeb823 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -120,6 +120,9 @@ extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_power_flags[32]; +#define test_cpu_cap(c, bit) \ + test_bit(bit, (unsigned long *)((c)->x86_capability)) + #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && \ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ @@ -131,7 +134,8 @@ extern const char * const x86_power_flags[32]; (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ ? 1 : \ - test_bit(bit, (unsigned long *)((c)->x86_capability))) + test_cpu_cap(c, bit)) + #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) -- cgit v1.2.1 From 9716951efd98ada69c417adddc85d9bbe1d7835a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:54:01 +0100 Subject: x86: clean up cpu capabilities accesses, generic Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/setup_64.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ef9e31b89b35..bd111ce8f605 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -374,7 +374,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) #else c->apicid = (ebx >> 24) & 0xFF; #endif - if (c->x86_capability[0] & (1<<19)) + if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; } else { /* Have CPUID level 0 only - unheard of */ diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 164200257b68..d65b73e63384 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -687,7 +687,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ - clear_bit(0*32+31, (unsigned long *)&c->x86_capability); + clear_cpu_cap(c, 0*32+31); /* On C+ stepping K8 rep microcode works well for copy/memset */ level = cpuid_eax(1); @@ -823,7 +823,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) - set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); } static void __cpuinit init_intel(struct cpuinfo_x86 *c) @@ -929,7 +929,7 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) c->x86 += (tfms >> 20) & 0xff; if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; - if (c->x86_capability[0] & (1<<19)) + if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; } else { /* Have CPUID level 0 only - unheard of */ -- cgit v1.2.1 From 16282a8e25f1783f296e5116dcef810a8e68d1a0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:49:57 +0100 Subject: x86: clean up cpu capabilities accesses, amd.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/amd.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1a3e1bb4d758..33d38f8305ee 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -68,7 +68,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) if (cpuid_eax(0x80000000) >= 0x80000007) { c->x86_power = cpuid_edx(0x80000007); if (c->x86_power & (1<<8)) - set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); } } @@ -105,9 +105,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* * Bit 31 in normal CPUID used for nonstandard 3DNow ID; - * DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway + * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ - clear_bit(0*32+31, c->x86_capability); + clear_cpu_cap(c, 0*32+31); r = get_model_name(c); @@ -131,8 +131,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (c->x86_model < 6) { /* Based on AMD doc 20734R - June 2000 */ if (c->x86_model == 0) { - clear_bit(X86_FEATURE_APIC, c->x86_capability); - set_bit(X86_FEATURE_PGE, c->x86_capability); + clear_cpu_cap(c, X86_FEATURE_APIC); + set_cpu_cap(c, X86_FEATURE_PGE); } break; } @@ -208,7 +208,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* Set MTRR capability flag if appropriate */ if (c->x86_model == 13 || c->x86_model == 9 || (c->x86_model == 8 && c->x86_mask >= 8)) - set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_K6_MTRR); break; } @@ -231,7 +231,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) rdmsr(MSR_K7_HWCR, l, h); l &= ~0x00008000; wrmsr(MSR_K7_HWCR, l, h); - set_bit(X86_FEATURE_XMM, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_XMM); } } @@ -256,14 +256,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* Use K8 tuning for Fam10h and Fam11h */ case 0x10: case 0x11: - set_bit(X86_FEATURE_K8, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_K8); break; case 6: - set_bit(X86_FEATURE_K7, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_K7); break; } if (c->x86 >= 6) - set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); display_cacheinfo(c); @@ -304,10 +304,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* K6s reports MCEs but don't actually have all the MSRs */ if (c->x86 < 6) - clear_bit(X86_FEATURE_MCE, c->x86_capability); + clear_cpu_cap(c, X86_FEATURE_MCE); if (cpu_has_xmm2) - set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); } static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) -- cgit v1.2.1 From e1a94a974c2aa3c0a7c1a915c805211fb6773de1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:51:22 +0100 Subject: x86: clean up cpu capabilities accesses, centaur.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/centaur.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index efe8da88da53..e0f45edd6a55 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -282,12 +282,12 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) rdmsr(MSR_VIA_FCR, lo, hi); lo |= (1<<1 | 1<<7); wrmsr(MSR_VIA_FCR, lo, hi); - set_bit(X86_FEATURE_CX8, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CX8); } /* Before Nehemiah, the C3's had 3dNOW! */ if (c->x86_model >= 6 && c->x86_model < 9) - set_bit(X86_FEATURE_3DNOW, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_3DNOW); get_model_name(c); display_cacheinfo(c); @@ -327,7 +327,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ - clear_bit(0*32+31, c->x86_capability); + clear_cpu_cap(c, 0*32+31); switch (c->x86) { case 5: @@ -337,7 +337,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; fcr_clr = DPDC; printk(KERN_NOTICE "Disabling bugged TSC.\n"); - clear_bit(X86_FEATURE_TSC, c->x86_capability); + clear_cpu_cap(c, X86_FEATURE_TSC); #ifdef CONFIG_X86_OOSTORE centaur_create_optimal_mcr(); /* @@ -418,12 +418,12 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); } /* Emulate MTRRs using Centaur's MCR. */ - set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); /* Report CX8 */ - set_bit(X86_FEATURE_CX8, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CX8); /* Set 3DNow! on Winchip 2 and above. */ if (c->x86_model >= 8) - set_bit(X86_FEATURE_3DNOW, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_3DNOW); /* See if we can find out some more. */ if (cpuid_eax(0x80000000) >= 0x80000005) { /* Yes, we can. */ -- cgit v1.2.1 From 4cbe668add030a35e0592a9bb292e0f2a1bcea88 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:51:32 +0100 Subject: x86: clean up cpu capabilities accesses, common.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index bd111ce8f605..57a46c36fa23 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -409,7 +409,7 @@ static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) lo |= 0x200000; wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); printk(KERN_NOTICE "CPU serial number disabled.\n"); - clear_bit(X86_FEATURE_PN, c->x86_capability); + clear_cpu_cap(c, X86_FEATURE_PN); /* Disabling the serial number may affect the cpuid level */ c->cpuid_level = cpuid_eax(0); -- cgit v1.2.1 From 1d007cd5aeea2c9283e01433dbce4c9f91dd7823 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:52:27 +0100 Subject: x86: clean up cpu capabilities accesses, cyrix.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cyrix.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index f7085bde4c28..3fd7a67bb06a 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -190,12 +190,12 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ - clear_bit(0*32+31, c->x86_capability); + clear_cpu_cap(c, 0*32+31); /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ - if (test_bit(1*32+24, c->x86_capability)) { - clear_bit(1*32+24, c->x86_capability); - set_bit(X86_FEATURE_CXMMX, c->x86_capability); + if (test_cpu_cap(c, 1*32+24)) { + clear_cpu_cap(c, 1*32+24); + set_cpu_cap(c, X86_FEATURE_CXMMX); } do_cyrix_devid(&dir0, &dir1); @@ -242,7 +242,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) } else /* 686 */ p = Cx86_cb+1; /* Emulate MTRRs using Cyrix's ARRs. */ - set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); /* 6x86's contain this bug */ c->coma_bug = 1; break; @@ -319,7 +319,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) (c->x86_model)++; /* Emulate MTRRs using Cyrix's ARRs. */ - set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; case 0xf: /* Cyrix 486 without DEVID registers */ -- cgit v1.2.1 From d0e95ebdc5cf5fe6fa29f2e0a5c6a0fe5c5aa50f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:52:33 +0100 Subject: x86: clean up cpu capabilities in arch/x86/kernel/cpu/intel.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c9ecf378cc41..fe9224c51d37 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -143,12 +143,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) - set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) - clear_bit(X86_FEATURE_SEP, c->x86_capability); + clear_cpu_cap(c, X86_FEATURE_SEP); /* * Names for the Pentium II/Celeron processors @@ -209,19 +209,19 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) #endif if (cpu_has_xmm2) - set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); if (c->x86 == 15) { - set_bit(X86_FEATURE_P4, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_P4); } if (c->x86 == 6) - set_bit(X86_FEATURE_P3, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_P3); if (cpu_has_ds) { unsigned int l1; rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & (1<<11))) - set_bit(X86_FEATURE_BTS, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_BTS); if (!(l1 & (1<<12))) - set_bit(X86_FEATURE_PEBS, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_PEBS); } if (cpu_has_bts) -- cgit v1.2.1 From 10cd5a1e5403d79a2d53425e6a4c8612e02ba973 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 08:52:39 +0100 Subject: x86: clean up cpu capabilities accesses, transmeta.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/transmeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index daee21d208f4..b911a2c61b8f 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -74,7 +74,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) wrmsr(0x80860004, cap_mask, uk); /* All Transmeta CPUs have a constant TSC */ - set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_SYSCTL /* -- cgit v1.2.1 From b5964405fbc4fd4c57e0e1f86bc9f1b3dbfa040a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 11:15:50 +0100 Subject: x86: clean up traps_32.c Before: total: 86 errors, 29 warnings, 1248 lines checked After: total: 0 errors, 17 warnings, 1281 lines checked No code changed: arch/x86/kernel/traps_32.o: text data bss dec hex filename 8711 2168 72 10951 2ac7 traps_32.o.before 8711 2168 72 10951 2ac7 traps_32.o.after (md5 sums differ because some stack offset positions changed.) Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps_32.c | 604 ++++++++++++++++++++++++--------------------- 1 file changed, 318 insertions(+), 286 deletions(-) diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index b22c01e05a18..57a5704e3f6c 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -9,26 +9,28 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */ -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include +#include #include +#include +#include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include #ifdef CONFIG_EISA #include @@ -43,21 +45,18 @@ #include #endif +#include +#include #include -#include -#include -#include #include +#include +#include +#include #include #include #include -#include #include -#include -#include -#include - -#include +#include #include "mach_traps.h" @@ -69,7 +68,7 @@ EXPORT_SYMBOL_GPL(used_vectors); asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ -char ignore_fpu_irq = 0; +char ignore_fpu_irq; /* * The IDT has to be page-aligned to simplify the Pentium @@ -105,12 +104,13 @@ static unsigned int code_bytes = 64; void printk_address(unsigned long address, int reliable) { #ifdef CONFIG_KALLSYMS - unsigned long offset = 0, symsize; + char namebuf[KSYM_NAME_LEN]; + unsigned long offset = 0; + unsigned long symsize; const char *symname; - char *modname; - char *delim = ":"; - char namebuf[128]; char reliab[4] = ""; + char *delim = ":"; + char *modname; symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); @@ -138,13 +138,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned s /* The form of the top of the frame on the stack */ struct stack_frame { - struct stack_frame *next_frame; - unsigned long return_address; + struct stack_frame *next_frame; + unsigned long return_address; }; -static inline unsigned long print_context_stack(struct thread_info *tinfo, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data) +static inline unsigned long +print_context_stack(struct thread_info *tinfo, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data) { struct stack_frame *frame = (struct stack_frame *)bp; @@ -166,7 +167,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, return bp; } -#define MSG(msg) ops->warning(data, msg) +#define MSG(msg) ops->warning(data, msg) void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, @@ -177,6 +178,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, if (!stack) { unsigned long dummy; + stack = &dummy; if (task != current) stack = (unsigned long *)task->thread.sp; @@ -186,7 +188,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, if (!bp) { if (task == current) { /* Grab bp right from our regs */ - asm ("movl %%ebp, %0" : "=r" (bp) : ); + asm("movl %%ebp, %0" : "=r" (bp) :); } else { /* bp is the last reg pushed by switch_to */ bp = *(unsigned long *) task->thread.sp; @@ -196,15 +198,18 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, while (1) { struct thread_info *context; + context = (struct thread_info *) ((unsigned long)stack & (~(THREAD_SIZE - 1))); bp = print_context_stack(context, stack, bp, ops, data); - /* Should be after the line below, but somewhere - in early boot context comes out corrupted and we - can't reference it -AK */ + /* + * Should be after the line below, but somewhere + * in early boot context comes out corrupted and we + * can't reference it: + */ if (ops->stack(data, "IRQ") < 0) break; - stack = (unsigned long*)context->previous_esp; + stack = (unsigned long *)context->previous_esp; if (!stack) break; touch_nmi_watchdog(); @@ -243,15 +248,15 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, - .stack = print_trace_stack, - .address = print_trace_address, + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, + .stack = print_trace_stack, + .address = print_trace_address, }; static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, unsigned long bp, char *log_lvl) + unsigned long *stack, unsigned long bp, char *log_lvl) { dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); printk("%s =======================\n", log_lvl); @@ -263,21 +268,22 @@ void show_trace(struct task_struct *task, struct pt_regs *regs, show_trace_log_lvl(task, regs, stack, bp, ""); } -static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *sp, unsigned long bp, char *log_lvl) +static void +show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, unsigned long bp, char *log_lvl) { unsigned long *stack; int i; if (sp == NULL) { if (task) - sp = (unsigned long*)task->thread.sp; + sp = (unsigned long *)task->thread.sp; else sp = (unsigned long *)&sp; } stack = sp; - for(i = 0; i < kstack_depth_to_print; i++) { + for (i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if (i && ((i % 8) == 0)) @@ -285,6 +291,7 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%08lx ", *stack++); } printk("\n%sCall Trace:\n", log_lvl); + show_trace_log_lvl(task, regs, sp, bp, log_lvl); } @@ -299,8 +306,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) */ void dump_stack(void) { - unsigned long stack; unsigned long bp = 0; + unsigned long stack; #ifdef CONFIG_FRAME_POINTER if (!bp) @@ -312,6 +319,7 @@ void dump_stack(void) init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); + show_trace(current, NULL, &stack, bp); } @@ -323,6 +331,7 @@ void show_registers(struct pt_regs *regs) print_modules(); __show_registers(regs, 0); + printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", TASK_COMM_LEN, current->comm, task_pid_nr(current), current_thread_info(), current, task_thread_info(current)); @@ -331,10 +340,10 @@ void show_registers(struct pt_regs *regs) * time of the fault.. */ if (!user_mode_vm(regs)) { - u8 *ip; unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; + u8 *ip; printk("\n" KERN_EMERG "Stack: "); show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); @@ -361,7 +370,7 @@ void show_registers(struct pt_regs *regs) } } printk("\n"); -} +} int is_valid_bugaddr(unsigned long ip) { @@ -377,10 +386,10 @@ int is_valid_bugaddr(unsigned long ip) static int die_counter; -int __kprobes __die(const char * str, struct pt_regs * regs, long err) +int __kprobes __die(const char *str, struct pt_regs *regs, long err) { - unsigned long sp; unsigned short ss; + unsigned long sp; printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); #ifdef CONFIG_PREEMPT @@ -395,8 +404,8 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) printk("\n"); if (notify_die(DIE_OOPS, str, regs, err, - current->thread.trap_no, SIGSEGV) != - NOTIFY_STOP) { + current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) { + show_registers(regs); /* Executive summary in case the oops scrolled away */ sp = (unsigned long) (®s->sp); @@ -408,17 +417,18 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); print_symbol("%s", regs->ip); printk(" SS:ESP %04x:%08lx\n", ss, sp); + return 0; - } else { - return 1; } + + return 1; } /* - * This is gone through when something in the kernel has done something bad and - * is about to be terminated. + * This is gone through when something in the kernel has done something bad + * and is about to be terminated: */ -void die(const char * str, struct pt_regs * regs, long err) +void die(const char *str, struct pt_regs *regs, long err) { static struct { raw_spinlock_t lock; @@ -440,8 +450,9 @@ void die(const char * str, struct pt_regs * regs, long err) die.lock_owner = smp_processor_id(); die.lock_owner_depth = 0; bust_spinlocks(1); - } else + } else { raw_local_irq_save(flags); + } if (++die.lock_owner_depth < 3) { report_bug(regs->ip, regs); @@ -474,15 +485,16 @@ void die(const char * str, struct pt_regs * regs, long err) do_exit(SIGSEGV); } -static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) +static inline void +die_if_kernel(const char *str, struct pt_regs *regs, long err) { if (!user_mode_vm(regs)) die(str, regs, err); } -static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86, - struct pt_regs * regs, long error_code, - siginfo_t *info) +static void __kprobes +do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs, + long error_code, siginfo_t *info) { struct task_struct *tsk = current; @@ -495,111 +507,112 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86, if (!user_mode(regs)) goto kernel_trap; - trap_signal: { - /* - * We want error_code and trap_no set for userspace faults and - * kernelspace faults which result in die(), but not - * kernelspace faults which are fixed up. die() gives the - * process no chance to handle the signal and notice the - * kernel fault information, so that won't result in polluting - * the information about previously queued, but not yet - * delivered, faults. See also do_general_protection below. - */ - tsk->thread.error_code = error_code; - tsk->thread.trap_no = trapnr; +trap_signal: + /* + * We want error_code and trap_no set for userspace faults and + * kernelspace faults which result in die(), but not + * kernelspace faults which are fixed up. die() gives the + * process no chance to handle the signal and notice the + * kernel fault information, so that won't result in polluting + * the information about previously queued, but not yet + * delivered, faults. See also do_general_protection below. + */ + tsk->thread.error_code = error_code; + tsk->thread.trap_no = trapnr; - if (info) - force_sig_info(signr, info, tsk); - else - force_sig(signr, tsk); - return; - } + if (info) + force_sig_info(signr, info, tsk); + else + force_sig(signr, tsk); + return; - kernel_trap: { - if (!fixup_exception(regs)) { - tsk->thread.error_code = error_code; - tsk->thread.trap_no = trapnr; - die(str, regs, error_code); - } - return; +kernel_trap: + if (!fixup_exception(regs)) { + tsk->thread.error_code = error_code; + tsk->thread.trap_no = trapnr; + die(str, regs, error_code); } + return; - vm86_trap: { - int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); - if (ret) goto trap_signal; - return; - } +vm86_trap: + if (handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, trapnr)) + goto trap_signal; + return; } -#define DO_ERROR(trapnr, signr, str, name) \ -void do_##name(struct pt_regs * regs, long error_code) \ -{ \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ - return; \ - do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ +#define DO_ERROR(trapnr, signr, str, name) \ +void do_##name(struct pt_regs *regs, long error_code) \ +{ \ + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ + == NOTIFY_STOP) \ + return; \ + do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ } -#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \ -void do_##name(struct pt_regs * regs, long error_code) \ -{ \ - siginfo_t info; \ - if (irq) \ - local_irq_enable(); \ - info.si_signo = signr; \ - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)siaddr; \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ - return; \ - do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ +#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \ +void do_##name(struct pt_regs *regs, long error_code) \ +{ \ + siginfo_t info; \ + if (irq) \ + local_irq_enable(); \ + info.si_signo = signr; \ + info.si_errno = 0; \ + info.si_code = sicode; \ + info.si_addr = (void __user *)siaddr; \ + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ + == NOTIFY_STOP) \ + return; \ + do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ } -#define DO_VM86_ERROR(trapnr, signr, str, name) \ -void do_##name(struct pt_regs * regs, long error_code) \ -{ \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ - return; \ - do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ +#define DO_VM86_ERROR(trapnr, signr, str, name) \ +void do_##name(struct pt_regs *regs, long error_code) \ +{ \ + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ + == NOTIFY_STOP) \ + return; \ + do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ } -#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ -void do_##name(struct pt_regs * regs, long error_code) \ -{ \ - siginfo_t info; \ - info.si_signo = signr; \ - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)siaddr; \ - trace_hardirqs_fixup(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ - return; \ - do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ +#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ +void do_##name(struct pt_regs *regs, long error_code) \ +{ \ + siginfo_t info; \ + info.si_signo = signr; \ + info.si_errno = 0; \ + info.si_code = sicode; \ + info.si_addr = (void __user *)siaddr; \ + trace_hardirqs_fixup(); \ + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ + == NOTIFY_STOP) \ + return; \ + do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ } -DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) +DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) #ifndef CONFIG_KPROBES -DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) +DO_VM86_ERROR(3, SIGTRAP, "int3", int3) #endif -DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) -DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) -DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0) -DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) +DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow) +DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds) +DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0) +DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) DO_ERROR(12, SIGBUS, "stack segment", stack_segment) DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) -void __kprobes do_general_protection(struct pt_regs * regs, - long error_code) +void __kprobes do_general_protection(struct pt_regs *regs, long error_code) { - int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); - struct thread_struct *thread = ¤t->thread; + struct thread_struct *thread; + struct tss_struct *tss; + int cpu; + + cpu = get_cpu(); + tss = &per_cpu(init_tss, cpu); + thread = ¤t->thread; /* * Perform the lazy TSS's I/O bitmap copy. If the TSS has an @@ -616,14 +629,16 @@ void __kprobes do_general_protection(struct pt_regs * regs, * If the previously set map was extending to higher ports * than the current one, pad extra space with 0xff (no access). */ - if (thread->io_bitmap_max < tss->io_bitmap_max) + if (thread->io_bitmap_max < tss->io_bitmap_max) { memset((char *) tss->io_bitmap + thread->io_bitmap_max, 0xff, tss->io_bitmap_max - thread->io_bitmap_max); + } tss->io_bitmap_max = thread->io_bitmap_max; tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; tss->io_bitmap_owner = thread; put_cpu(); + return; } put_cpu(); @@ -636,6 +651,7 @@ void __kprobes do_general_protection(struct pt_regs * regs, current->thread.error_code = error_code; current->thread.trap_no = 13; + if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) && printk_ratelimit()) { printk(KERN_INFO @@ -666,21 +682,24 @@ gp_in_kernel: } static __kprobes void -mem_parity_error(unsigned char reason, struct pt_regs * regs) +mem_parity_error(unsigned char reason, struct pt_regs *regs) { - printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on " - "CPU %d.\n", reason, smp_processor_id()); - printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); + printk(KERN_EMERG + "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", + reason, smp_processor_id()); + + printk(KERN_EMERG + "You have some hardware problem, likely on the PCI bus.\n"); #if defined(CONFIG_EDAC) - if(edac_handler_set()) { + if (edac_handler_set()) { edac_atomic_assert_error(); return; } #endif if (panic_on_unrecovered_nmi) - panic("NMI: Not continuing"); + panic("NMI: Not continuing"); printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); @@ -689,7 +708,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs) } static __kprobes void -io_check_error(unsigned char reason, struct pt_regs * regs) +io_check_error(unsigned char reason, struct pt_regs *regs) { unsigned long i; @@ -699,28 +718,35 @@ io_check_error(unsigned char reason, struct pt_regs * regs) /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & 0xf) | 8; outb(reason, 0x61); + i = 2000; - while (--i) udelay(1000); + while (--i) + udelay(1000); + reason &= ~8; outb(reason, 0x61); } static __kprobes void -unknown_nmi_error(unsigned char reason, struct pt_regs * regs) +unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { #ifdef CONFIG_MCA - /* Might actually be able to figure out what the guilty party - * is. */ - if( MCA_bus ) { + /* + * Might actually be able to figure out what the guilty party + * is: + */ + if (MCA_bus) { mca_handle_nmi(); return; } #endif - printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on " - "CPU %d.\n", reason, smp_processor_id()); + printk(KERN_EMERG + "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", + reason, smp_processor_id()); + printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); if (panic_on_unrecovered_nmi) - panic("NMI: Not continuing"); + panic("NMI: Not continuing"); printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); } @@ -729,14 +755,13 @@ static DEFINE_SPINLOCK(nmi_print_lock); void __kprobes die_nmi(struct pt_regs *regs, const char *msg) { - if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == - NOTIFY_STOP) + if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP) return; spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try - * to get a message out. + * to get a message out: */ bust_spinlocks(1); printk(KERN_EMERG "%s", msg); @@ -747,9 +772,10 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg) spin_unlock(&nmi_print_lock); bust_spinlocks(0); - /* If we are in kernel we are probably nested up pretty bad - * and might aswell get out now while we still can. - */ + /* + * If we are in kernel we are probably nested up pretty bad + * and might aswell get out now while we still can: + */ if (!user_mode_vm(regs)) { current->thread.trap_no = 2; crash_kexec(regs); @@ -758,14 +784,14 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg) do_exit(SIGSEGV); } -static __kprobes void default_do_nmi(struct pt_regs * regs) +static __kprobes void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; - /* Only the BSP gets external NMIs from the system. */ + /* Only the BSP gets external NMIs from the system: */ if (!smp_processor_id()) reason = get_nmi_reason(); - + if (!(reason & 0xc0)) { if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) == NOTIFY_STOP) @@ -778,8 +804,10 @@ static __kprobes void default_do_nmi(struct pt_regs * regs) if (nmi_watchdog_tick(regs, reason)) return; if (!do_nmi_callback(regs, smp_processor_id())) -#endif unknown_nmi_error(reason, regs); +#else + unknown_nmi_error(reason, regs); +#endif return; } @@ -791,14 +819,14 @@ static __kprobes void default_do_nmi(struct pt_regs * regs) io_check_error(reason, regs); /* * Reassert NMI in case it became active meanwhile - * as it's edge-triggered. + * as it's edge-triggered: */ reassert_nmi(); } static int ignore_nmis; -__kprobes void do_nmi(struct pt_regs * regs, long error_code) +__kprobes void do_nmi(struct pt_regs *regs, long error_code) { int cpu; @@ -834,9 +862,12 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code) if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return; - /* This is an interrupt gate, because kprobes wants interrupts - disabled. Normal trap handlers don't. */ + /* + * This is an interrupt gate, because kprobes wants interrupts + * disabled. Normal trap handlers don't. + */ restore_interrupts(regs); + do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); } #endif @@ -851,7 +882,7 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code) * from user space. Such code must not hold kernel locks (since it * can equally take a page fault), therefore it is safe to call * force_sig_info even though that claims and releases locks. - * + * * Code in ./signal.c ensures that the debug control register * is restored before we deliver any signal, and therefore that * user code runs with the correct debug control register even though @@ -863,10 +894,10 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code) * find every occurrence of the TF bit that could be saved away even * by user code) */ -void __kprobes do_debug(struct pt_regs * regs, long error_code) +void __kprobes do_debug(struct pt_regs *regs, long error_code) { - unsigned int condition; struct task_struct *tsk = current; + unsigned int condition; trace_hardirqs_fixup(); @@ -914,7 +945,8 @@ void __kprobes do_debug(struct pt_regs * regs, long error_code) /* Ok, finally something we can handle */ send_sigtrap(tsk, regs, error_code); - /* Disable additional traps. They'll be re-enabled when + /* + * Disable additional traps. They'll be re-enabled when * the signal is delivered. */ clear_dr7: @@ -938,9 +970,10 @@ clear_TF_reenable: */ void math_error(void __user *ip) { - struct task_struct * task; + struct task_struct *task; + unsigned short cwd; + unsigned short swd; siginfo_t info; - unsigned short cwd, swd; /* * Save the info for the exception handler and clear the error. @@ -966,36 +999,36 @@ void math_error(void __user *ip) cwd = get_fpu_cwd(task); swd = get_fpu_swd(task); switch (swd & ~cwd & 0x3f) { - case 0x000: /* No unmasked exception */ - return; - default: /* Multiple exceptions */ - break; - case 0x001: /* Invalid Op */ - /* - * swd & 0x240 == 0x040: Stack Underflow - * swd & 0x240 == 0x240: Stack Overflow - * User must clear the SF bit (0x40) if set - */ - info.si_code = FPE_FLTINV; - break; - case 0x002: /* Denormalize */ - case 0x010: /* Underflow */ - info.si_code = FPE_FLTUND; - break; - case 0x004: /* Zero Divide */ - info.si_code = FPE_FLTDIV; - break; - case 0x008: /* Overflow */ - info.si_code = FPE_FLTOVF; - break; - case 0x020: /* Precision */ - info.si_code = FPE_FLTRES; - break; + case 0x000: /* No unmasked exception */ + return; + default: /* Multiple exceptions */ + break; + case 0x001: /* Invalid Op */ + /* + * swd & 0x240 == 0x040: Stack Underflow + * swd & 0x240 == 0x240: Stack Overflow + * User must clear the SF bit (0x40) if set + */ + info.si_code = FPE_FLTINV; + break; + case 0x002: /* Denormalize */ + case 0x010: /* Underflow */ + info.si_code = FPE_FLTUND; + break; + case 0x004: /* Zero Divide */ + info.si_code = FPE_FLTDIV; + break; + case 0x008: /* Overflow */ + info.si_code = FPE_FLTOVF; + break; + case 0x020: /* Precision */ + info.si_code = FPE_FLTRES; + break; } force_sig_info(SIGFPE, &info, task); } -void do_coprocessor_error(struct pt_regs * regs, long error_code) +void do_coprocessor_error(struct pt_regs *regs, long error_code) { ignore_fpu_irq = 1; math_error((void __user *)regs->ip); @@ -1003,9 +1036,9 @@ void do_coprocessor_error(struct pt_regs * regs, long error_code) static void simd_math_error(void __user *ip) { - struct task_struct * task; - siginfo_t info; + struct task_struct *task; unsigned short mxcsr; + siginfo_t info; /* * Save the info for the exception handler and clear the error. @@ -1026,82 +1059,80 @@ static void simd_math_error(void __user *ip) */ mxcsr = get_fpu_mxcsr(task); switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { - case 0x000: - default: - break; - case 0x001: /* Invalid Op */ - info.si_code = FPE_FLTINV; - break; - case 0x002: /* Denormalize */ - case 0x010: /* Underflow */ - info.si_code = FPE_FLTUND; - break; - case 0x004: /* Zero Divide */ - info.si_code = FPE_FLTDIV; - break; - case 0x008: /* Overflow */ - info.si_code = FPE_FLTOVF; - break; - case 0x020: /* Precision */ - info.si_code = FPE_FLTRES; - break; + case 0x000: + default: + break; + case 0x001: /* Invalid Op */ + info.si_code = FPE_FLTINV; + break; + case 0x002: /* Denormalize */ + case 0x010: /* Underflow */ + info.si_code = FPE_FLTUND; + break; + case 0x004: /* Zero Divide */ + info.si_code = FPE_FLTDIV; + break; + case 0x008: /* Overflow */ + info.si_code = FPE_FLTOVF; + break; + case 0x020: /* Precision */ + info.si_code = FPE_FLTRES; + break; } force_sig_info(SIGFPE, &info, task); } -void do_simd_coprocessor_error(struct pt_regs * regs, - long error_code) +void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { if (cpu_has_xmm) { /* Handle SIMD FPU exceptions on PIII+ processors. */ ignore_fpu_irq = 1; simd_math_error((void __user *)regs->ip); - } else { - /* - * Handle strange cache flush from user space exception - * in all other cases. This is undocumented behaviour. - */ - if (regs->flags & VM_MASK) { - handle_vm86_fault((struct kernel_vm86_regs *)regs, - error_code); - return; - } - current->thread.trap_no = 19; - current->thread.error_code = error_code; - die_if_kernel("cache flush denied", regs, error_code); - force_sig(SIGSEGV, current); + return; + } + /* + * Handle strange cache flush from user space exception + * in all other cases. This is undocumented behaviour. + */ + if (regs->flags & VM_MASK) { + handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); + return; } + current->thread.trap_no = 19; + current->thread.error_code = error_code; + die_if_kernel("cache flush denied", regs, error_code); + force_sig(SIGSEGV, current); } -void do_spurious_interrupt_bug(struct pt_regs * regs, - long error_code) +void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) { #if 0 /* No need to warn about this any longer. */ - printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); + printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); #endif } -unsigned long patch_espfix_desc(unsigned long uesp, - unsigned long kesp) +unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) { struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt; unsigned long base = (kesp - uesp) & -THREAD_SIZE; unsigned long new_kesp = kesp - base; unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; + /* Set up base for espfix segment */ - desc &= 0x00f0ff0000000000ULL; - desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | + desc &= 0x00f0ff0000000000ULL; + desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | ((((__u64)base) << 32) & 0xff00000000000000ULL) | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | (lim_pages & 0xffff); *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; + return new_kesp; } /* - * 'math_state_restore()' saves the current math information in the + * 'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. @@ -1115,7 +1146,7 @@ asmlinkage void math_state_restore(void) struct thread_info *thread = current_thread_info(); struct task_struct *tsk = thread->task; - clts(); /* Allow maths ops (or we recurse) */ + clts(); /* Allow maths ops (or we recurse) */ if (!tsk_used_math(tsk)) init_fpu(tsk); restore_fpu(tsk); @@ -1128,53 +1159,52 @@ EXPORT_SYMBOL_GPL(math_state_restore); asmlinkage void math_emulate(long arg) { - printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n"); - printk(KERN_EMERG "killing %s.\n",current->comm); - force_sig(SIGFPE,current); + printk(KERN_EMERG + "math-emulation not enabled and no coprocessor found.\n"); + printk(KERN_EMERG "killing %s.\n", current->comm); + force_sig(SIGFPE, current); schedule(); } #endif /* CONFIG_MATH_EMULATION */ - void __init trap_init(void) { int i; #ifdef CONFIG_EISA void __iomem *p = early_ioremap(0x0FFFD9, 4); - if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { + + if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) EISA_bus = 1; - } early_iounmap(p, 4); #endif #ifdef CONFIG_X86_LOCAL_APIC init_apic_mappings(); #endif - - set_trap_gate(0,÷_error); - set_intr_gate(1,&debug); - set_intr_gate(2,&nmi); + set_trap_gate(0, ÷_error); + set_intr_gate(1, &debug); + set_intr_gate(2, &nmi); set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ - set_system_gate(4,&overflow); - set_trap_gate(5,&bounds); - set_trap_gate(6,&invalid_op); - set_trap_gate(7,&device_not_available); - set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); - set_trap_gate(9,&coprocessor_segment_overrun); - set_trap_gate(10,&invalid_TSS); - set_trap_gate(11,&segment_not_present); - set_trap_gate(12,&stack_segment); - set_trap_gate(13,&general_protection); - set_intr_gate(14,&page_fault); - set_trap_gate(15,&spurious_interrupt_bug); - set_trap_gate(16,&coprocessor_error); - set_trap_gate(17,&alignment_check); + set_system_gate(4, &overflow); + set_trap_gate(5, &bounds); + set_trap_gate(6, &invalid_op); + set_trap_gate(7, &device_not_available); + set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); + set_trap_gate(9, &coprocessor_segment_overrun); + set_trap_gate(10, &invalid_TSS); + set_trap_gate(11, &segment_not_present); + set_trap_gate(12, &stack_segment); + set_trap_gate(13, &general_protection); + set_intr_gate(14, &page_fault); + set_trap_gate(15, &spurious_interrupt_bug); + set_trap_gate(16, &coprocessor_error); + set_trap_gate(17, &alignment_check); #ifdef CONFIG_X86_MCE - set_trap_gate(18,&machine_check); + set_trap_gate(18, &machine_check); #endif - set_trap_gate(19,&simd_coprocessor_error); + set_trap_gate(19, &simd_coprocessor_error); /* * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. @@ -1187,21 +1217,22 @@ void __init trap_init(void) printk("done.\n"); } if (cpu_has_xmm) { - printk(KERN_INFO "Enabling unmasked SIMD FPU exception " - "support... "); + printk(KERN_INFO + "Enabling unmasked SIMD FPU exception support... "); set_in_cr4(X86_CR4_OSXMMEXCPT); printk("done.\n"); } - set_system_gate(SYSCALL_VECTOR,&system_call); + set_system_gate(SYSCALL_VECTOR, &system_call); - /* Reserve all the builtin and the syscall vector. */ + /* Reserve all the builtin and the syscall vector: */ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) set_bit(i, used_vectors); + set_bit(SYSCALL_VECTOR, used_vectors); /* - * Should be a barrier for any external CPU state. + * Should be a barrier for any external CPU state: */ cpu_init(); @@ -1211,6 +1242,7 @@ void __init trap_init(void) static int __init kstack_setup(char *s) { kstack_depth_to_print = simple_strtoul(s, NULL, 0); + return 1; } __setup("kstack=", kstack_setup); -- cgit v1.2.1 From 11ae9dd48128790d3d2ece6bc916c001b4a1d147 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 26 Feb 2008 13:23:32 +0300 Subject: x86: switch to proc_create() Signed-off-by: Alexey Dobriyan Signed-off-by: Ingo Molnar --- arch/x86/kernel/apm_32.c | 5 +---- arch/x86/kernel/cpu/mtrr/if.c | 7 +++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index d4438ef296d8..f0030a0999c7 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -2217,7 +2217,6 @@ static struct dmi_system_id __initdata apm_dmi_table[] = { */ static int __init apm_init(void) { - struct proc_dir_entry *apm_proc; struct desc_struct *gdt; int err; @@ -2322,9 +2321,7 @@ static int __init apm_init(void) set_base(gdt[APM_DS >> 3], __va((unsigned long)apm_info.bios.dseg << 4)); - apm_proc = create_proc_entry("apm", 0, NULL); - if (apm_proc) - apm_proc->proc_fops = &apm_file_ops; + proc_create("apm", 0, NULL, &apm_file_ops); kapmd_task = kthread_create(apm, NULL, "kapmd"); if (IS_ERR(kapmd_task)) { diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 91e150acb46c..1960f1985e5e 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -424,11 +424,10 @@ static int __init mtrr_if_init(void) return -ENODEV; proc_root_mtrr = - create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root); - if (proc_root_mtrr) { + proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops); + + if (proc_root_mtrr) proc_root_mtrr->owner = THIS_MODULE; - proc_root_mtrr->proc_fops = &mtrr_fops; - } return 0; } -- cgit v1.2.1 From 78a9909aab54123c7c471022389b36972e13b48e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 17 Apr 2008 17:40:51 +0200 Subject: x86, tracing: add notrace to asm-x86/linkage.h notrace signals that a function should not be traced. Most of the time this is used by tracers to annotate code that cannot be traced - it's in a volatile state (such as in user vdso context or NMI context) or it's in the tracer internals. Signed-off-by: Ingo Molnar --- include/asm-x86/linkage.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index c048353f4b85..64e444f8e85b 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h @@ -1,6 +1,9 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H +#undef notrace +#define notrace __attribute__((no_instrument_function)) + #ifdef CONFIG_X86_64 #define __ALIGN .p2align 4,,15 #define __ALIGN_STR ".p2align 4,,15" -- cgit v1.2.1 From b8c2d3dfbc117dff26058fbac316b8acfc2cb5f7 Mon Sep 17 00:00:00 2001 From: Markus Armbruster Date: Wed, 27 Feb 2008 14:56:35 +0100 Subject: xen: make hvc0 the preferred console in domU This makes the Xen console just work. Before, you had to ask for it on the kernel command line with console=hvc0 Signed-off-by: Markus Armbruster Signed-off-by: Ingo Molnar --- arch/x86/xen/enlighten.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 27ee26aedf94..198db49106b2 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -1228,6 +1229,9 @@ asmlinkage void __init xen_start_kernel(void) ? __pa(xen_start_info->mod_start) : 0; boot_params.hdr.ramdisk_size = xen_start_info->mod_len; + if (!is_initial_xendomain()) + add_preferred_console("hvc", 0, NULL); + /* Start the world */ start_kernel(); } -- cgit v1.2.1 From e3100c82abd9aa643dc15828202aceeae3504e03 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 27 Feb 2008 20:57:40 +0100 Subject: x86: check physical address range in ioremap Roland Dreier reported in http://lkml.org/lkml/2008/2/27/194 [ 8425.915139] BUG: unable to handle kernel paging request at ffffc20001a0a000 [ 8425.919087] IP: [] clflush_cache_range+0xc/0x25 [ 8425.919087] PGD 1bf80e067 PUD 1bf80f067 PMD 1bb497067 PTE 80000047000ee17b This is on a Intel machine with 36bit physical address space. The PTE entry references 47000ee000, which is outside of it. Add a check for the physical address space and warn/printk about the stupid caller. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 868bbde74698..17f518839028 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -35,6 +35,18 @@ unsigned long __phys_addr(unsigned long x) } EXPORT_SYMBOL(__phys_addr); +static inline int phys_addr_valid(unsigned long addr) +{ + return addr < (1UL << boot_cpu_data.x86_phys_bits); +} + +#else + +static inline int phys_addr_valid(unsigned long addr) +{ + return 1; +} + #endif int page_is_ram(unsigned long pagenr) @@ -118,6 +130,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, if (!size || last_addr < phys_addr) return NULL; + if (!phys_addr_valid(phys_addr)) { + printk(KERN_WARNING "ioremap: invalid physical address %lx\n", + phys_addr); + WARN_ON_ONCE(1); + return NULL; + } + /* * Don't remap the low PCI/ISA area, it's always mapped.. */ -- cgit v1.2.1 From b089c12b25284a5e31ede7c98936a2b36a41e090 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 27 Feb 2008 13:16:30 -0800 Subject: x86: X86_HT always enable on X86_64 SMP X86_HT is used for hyperthreading or multicore on 32-bit. The X86_HT on 64-bit is different from 32-bit, it means hyperthreading only. And X86_HT is not used on 64-bit except from cpu/initel_cacheinfo.c. Unify X86_HT for hyperthreading or multicore. Turn X86_HT on when X86_64 and SMP are enabled. Signed-off-by: Hiroshi Shimamoto Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ed7f72b39113..810248f01729 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -171,7 +171,7 @@ config X86_64_SMP config X86_HT bool depends on SMP - depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8) + depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || X86_64 default y config X86_BIOS_REBOOT @@ -508,7 +508,7 @@ config NR_CPUS config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" - depends on (X86_64 && SMP) || (X86_32 && X86_HT) + depends on X86_HT help SMT scheduler support improves the CPU scheduler's decision making when dealing with Intel Pentium 4 chips with HyperThreading at a @@ -518,7 +518,7 @@ config SCHED_SMT config SCHED_MC def_bool y prompt "Multi-core scheduler support" - depends on (X86_64 && SMP) || (X86_32 && X86_HT) + depends on X86_HT help Multi-core scheduler support improves the CPU scheduler's decision making when dealing with multi-core CPU chips at a cost of slightly -- cgit v1.2.1 From bdd3cee2e4b7279457139058615ced6c2b41e7de Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 Feb 2008 14:10:49 +0100 Subject: x86: ioremap(), extend check to all RAM pages Suggested by Jan Beulich. Signed-off-by: Ingo Molnar Acked-by: Jan Beulich --- arch/x86/mm/ioremap.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 17f518839028..e5608d380176 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -146,8 +146,9 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, /* * Don't allow anybody to remap normal RAM that we're using.. */ - for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped && - (pfn << PAGE_SHIFT) < last_addr; pfn++) { + for (pfn = phys_addr >> PAGE_SHIFT; + (pfn << PAGE_SHIFT) < last_addr; pfn++) { + if (page_is_ram(pfn) && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) return NULL; -- cgit v1.2.1 From 3f50dbc1aec96c4d66ffa1c564014f9f43fb9e11 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 29 Feb 2008 12:50:56 +0100 Subject: x86: coding style fixes to arch/x86/lib/usercopy_32.c Before: total: 63 errors, 2 warnings, 878 lines checked After: total: 0 errors, 2 warnings, 878 lines checked Compile tested, no change in the binary output: text data bss dec hex filename 3231 0 0 3231 c9f usercopy_32.o.after 3231 0 0 3231 c9f usercopy_32.o.before md5sum: 9f9a3eb43970359ae7cecfd1c9e7cf42 usercopy_32.o.after 9f9a3eb43970359ae7cecfd1c9e7cf42 usercopy_32.o.before Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/lib/usercopy_32.c | 122 ++++++++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e849b9998b0e..24e60944971a 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -1,4 +1,4 @@ -/* +/* * User address space access functions. * The non inlined parts of asm-i386/uaccess.h are here. * @@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon #endif return 1; } -#define movsl_is_ok(a1,a2,n) \ - __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) +#define movsl_is_ok(a1, a2, n) \ + __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) /* * Copy a null terminated string from userspace. */ -#define __do_strncpy_from_user(dst,src,count,res) \ +#define __do_strncpy_from_user(dst, src, count, res) \ do { \ int __d0, __d1, __d2; \ might_sleep(); \ @@ -61,7 +61,7 @@ do { \ * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. - * + * * Copies a NUL-terminated string from userspace to kernel space. * Caller must check the specified block with access_ok() before calling * this function. @@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user); * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. - * + * * Copies a NUL-terminated string from userspace to kernel space. * * On success, returns the length of the string (not including the trailing @@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user); do { \ int __d0; \ might_sleep(); \ - __asm__ __volatile__( \ + __asm__ __volatile__( \ "0: rep; stosl\n" \ " movl %2,%0\n" \ "1: rep; stosb\n" \ @@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __asm__ __volatile__( " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" + " cmpl $67, %0\n" + " jbe 2f\n" "1: movl 64(%4), %%eax\n" - " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" + " .align 2,0x90\n" + "2: movl 0(%4), %%eax\n" + "21: movl 4(%4), %%edx\n" + " movl %%eax, 0(%3)\n" + " movl %%edx, 4(%3)\n" + "3: movl 8(%4), %%eax\n" + "31: movl 12(%4),%%edx\n" + " movl %%eax, 8(%3)\n" " movl %%edx, 12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" @@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) "91: movl 60(%4), %%edx\n" " movl %%eax, 56(%3)\n" " movl %%edx, 60(%3)\n" - " addl $-64, %0\n" - " addl $64, %4\n" - " addl $64, %3\n" - " cmpl $63, %0\n" - " ja 0b\n" - "5: movl %0, %%eax\n" - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" - "6: rep; movsl\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 0b\n" + "5: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "6: rep; movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" - "8:\n" + "7: rep; movsb\n" + "8:\n" ".section .fixup,\"ax\"\n" - "9: lea 0(%%eax,%0,4),%0\n" - "16: pushl %0\n" - " pushl %%eax\n" + "9: lea 0(%%eax,%0,4),%0\n" + "16: pushl %0\n" + " pushl %%eax\n" " xorl %%eax,%%eax\n" - " rep; stosb\n" - " popl %%eax\n" - " popl %0\n" - " jmp 8b\n" - ".previous\n" + " rep; stosb\n" + " popl %%eax\n" + " popl %0\n" + " jmp 8b\n" + ".previous\n" ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,16b\n" + " .align 4\n" + " .long 0b,16b\n" " .long 1b,16b\n" " .long 2b,16b\n" " .long 21b,16b\n" - " .long 3b,16b\n" + " .long 3b,16b\n" " .long 31b,16b\n" - " .long 4b,16b\n" + " .long 4b,16b\n" " .long 41b,16b\n" " .long 10b,16b\n" " .long 51b,16b\n" @@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) " .long 81b,16b\n" " .long 14b,16b\n" " .long 91b,16b\n" - " .long 6b,9b\n" - " .long 7b,16b\n" - ".previous" + " .long 6b,9b\n" + " .long 7b,16b\n" + ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); @@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) static unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size) { - int d0, d1; + int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" @@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, static unsigned long __copy_user_intel_nocache(void *to, const void __user *from, unsigned long size) { - int d0, d1; + int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" @@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to, #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to,from,size) \ +#define __copy_user(to, from, size) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ @@ -665,7 +665,7 @@ do { \ : "memory"); \ } while (0) -#define __copy_user_zeroing(to,from,size) \ +#define __copy_user_zeroing(to, from, size) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ @@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, { #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && - ((unsigned long )to) < TASK_SIZE) { + ((unsigned long)to) < TASK_SIZE) { /* * When we are in an atomic section (see * mm/filemap.c:file_read_actor), return the full @@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, if (in_atomic()) return n; - /* + /* * CPU does not honor the WP bit when writing * from supervisory mode, and due to preemption or SMP, * the page tables can change at any time. * Do it manually. Manfred */ while (n) { - unsigned long offset = ((unsigned long)to)%PAGE_SIZE; + unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; - + if (len > n) len = n; survive: down_read(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, - (unsigned long )to, 1, 1, 0, &pg, NULL); + (unsigned long)to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && is_global_init(current)) { up_read(¤t->mm->mmap_sem); @@ -750,8 +750,8 @@ survive: if (retval != 1) { up_read(¤t->mm->mmap_sem); - break; - } + break; + } maddr = kmap_atomic(pg, KM_USER0); memcpy(maddr + offset, from, len); @@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY - if ( n > 64 && cpu_has_xmm2) - n = __copy_user_zeroing_intel_nocache(to, from, n); + if (n > 64 && cpu_has_xmm2) + n = __copy_user_zeroing_intel_nocache(to, from, n); else __copy_user_zeroing(to, from, n); #else - __copy_user_zeroing(to, from, n); + __copy_user_zeroing(to, from, n); #endif return n; } @@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY - if ( n > 64 && cpu_has_xmm2) - n = __copy_user_intel_nocache(to, from, n); + if (n > 64 && cpu_has_xmm2) + n = __copy_user_intel_nocache(to, from, n); else __copy_user(to, from, n); #else - __copy_user(to, from, n); + __copy_user(to, from, n); #endif return n; } -- cgit v1.2.1 From e941f27a7a0f4ecac9ba8237b8a329bab4bd622f Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 29 Feb 2008 13:25:30 +0100 Subject: x86: coding style fixes to arch/x86/kernel/early_printk.c Before: total: 17 errors, 3 warnings, 254 lines checked After: total: 2 errors, 3 warnings, 254 lines checked paolo@paolo-desktop:/tmp/b$ md5sum * da32f5cd8f248970e4809e1005393e95 early_printk.o.after da32f5cd8f248970e4809e1005393e95 early_printk.o.before paolo@paolo-desktop:/tmp/b$ size * text data bss dec hex filename 1172 280 12 1464 5b8 early_printk.o.after 1172 280 12 1464 5b8 early_printk.o.befor Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/early_printk.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index cff84cd9987f..a8f965dd58df 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -108,12 +108,12 @@ static __init void early_serial_init(char *s) if (*s) { unsigned port; - if (!strncmp(s,"0x",2)) { + if (!strncmp(s, "0x", 2)) { early_serial_base = simple_strtoul(s, &e, 16); } else { static int bases[] = { 0x3f8, 0x2f8 }; - if (!strncmp(s,"ttyS",4)) + if (!strncmp(s, "ttyS", 4)) s += 4; port = simple_strtoul(s, &e, 10); if (port > 1 || s == e) @@ -202,9 +202,9 @@ void early_printk(const char *fmt, ...) int n; va_list ap; - va_start(ap,fmt); - n = vscnprintf(buf,512,fmt,ap); - early_console->write(early_console,buf,n); + va_start(ap, fmt); + n = vscnprintf(buf, 512, fmt, ap); + early_console->write(early_console, buf, n); va_end(ap); } @@ -229,15 +229,15 @@ static int __init setup_early_printk(char *buf) early_serial_init(buf); early_console = &early_serial_console; } else if (!strncmp(buf, "vga", 3) - && boot_params.screen_info.orig_video_isVGA == 1) { + && boot_params.screen_info.orig_video_isVGA == 1) { max_xpos = boot_params.screen_info.orig_video_cols; max_ypos = boot_params.screen_info.orig_video_lines; current_ypos = boot_params.screen_info.orig_y; early_console = &early_vga_console; - } else if (!strncmp(buf, "simnow", 6)) { - simnow_init(buf + 6); - early_console = &simnow_console; - keep_early = 1; + } else if (!strncmp(buf, "simnow", 6)) { + simnow_init(buf + 6); + early_console = &simnow_console; + keep_early = 1; #ifdef CONFIG_HVC_XEN } else if (!strncmp(buf, "xen", 3)) { early_console = &xenboot_console; -- cgit v1.2.1 From c9cf39ae64a6c86872e580f921afec64ab9770f8 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 29 Feb 2008 13:26:56 +0100 Subject: x86: coding style fixes to x86/kernel/early_printk.c Depends on: [PATCH 2/3] x86: coding style fixes to arch/x86/kernel/early_printk.c Remove two: ERROR: do not initialise statics to 0 or NULL paolo@paolo-desktop:/tmp/c$ size * text data bss dec hex filename 1172 280 12 1464 5b8 early_printk.o.after 1172 280 12 1464 5b8 early_printk.o.before This patch is changing the binary output: paolo@paolo-desktop:/tmp/c$ md5sum * dad9a9a881e0eeda62cc5645bd3d7cad early_printk.o.after da32f5cd8f248970e4809e1005393e95 early_printk.o.before because the two variables moved to another section. No change in functionality. Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/early_printk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index a8f965dd58df..643fd861b724 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -13,7 +13,7 @@ #define VGABASE (__ISA_IO_base + 0xb8000) static int max_ypos = 25, max_xpos = 80; -static int current_ypos = 25, current_xpos = 0; +static int current_ypos = 25, current_xpos; static void early_vga_write(struct console *con, const char *str, unsigned n) { @@ -194,7 +194,7 @@ static struct console simnow_console = { /* Direct interface for emergencies */ static struct console *early_console = &early_vga_console; -static int early_console_initialized = 0; +static int early_console_initialized; void early_printk(const char *fmt, ...) { -- cgit v1.2.1 From 81e103f1f1bb0d35000f0e99626bf7abf864b486 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 17 Apr 2008 17:40:51 +0200 Subject: xen: use iret instruction all the time Change iret implementation to not be dependent on direct-access vcpu structure. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/xen/enlighten.c | 3 +-- arch/x86/xen/xen-asm.S | 11 +++-------- arch/x86/xen/xen-ops.h | 2 +- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 198db49106b2..c0388220cf97 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -890,7 +890,6 @@ void __init xen_setup_vcpu_info_placement(void) pv_irq_ops.irq_disable = xen_irq_disable_direct; pv_irq_ops.irq_enable = xen_irq_enable_direct; pv_mmu_ops.read_cr2 = xen_read_cr2_direct; - pv_cpu_ops.iret = xen_iret_direct; } } @@ -994,7 +993,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, - .iret = (void *)&hypercall_page[__HYPERVISOR_iret], + .iret = xen_iret, .irq_enable_syscall_ret = NULL, /* never called */ .load_tr_desc = paravirt_nop, diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 6b7190449d07..fe161ed4b01e 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -135,13 +135,8 @@ ENDPATCH(xen_restore_fl_direct) current stack state in whatever form its in, we keep things simple by only using a single register which is pushed/popped on the stack. - - Non-direct iret could be done in the same way, but it would - require an annoying amount of code duplication. We'll assume - that direct mode will be the common case once the hypervisor - support becomes commonplace. */ -ENTRY(xen_iret_direct) +ENTRY(xen_iret) /* test eflags for special cases */ testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) jnz hyper_iret @@ -155,9 +150,9 @@ ENTRY(xen_iret_direct) GET_THREAD_INFO(%eax) movl TI_cpu(%eax),%eax movl __per_cpu_offset(,%eax,4),%eax - lea per_cpu__xen_vcpu_info(%eax),%eax + mov per_cpu__xen_vcpu(%eax),%eax #else - movl $per_cpu__xen_vcpu_info, %eax + movl per_cpu__xen_vcpu, %eax #endif /* check IF state we're restoring */ diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index b02a909bfd4c..956a491ea998 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -63,5 +63,5 @@ DECL_ASM(void, xen_irq_disable_direct, void); DECL_ASM(unsigned long, xen_save_fl_direct, void); DECL_ASM(void, xen_restore_fl_direct, unsigned long); -void xen_iret_direct(void); +void xen_iret(void); #endif /* XEN_OPS_H */ -- cgit v1.2.1 From 823c248e7cc75b4f22da914b01f8e5433cff197e Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Fri, 29 Feb 2008 05:09:02 +0100 Subject: x86: fix recursive dependencies The proper dependency check uncovered a few dependency problems, the subarchitecture used a mixture of selects and depends on SMP and PCI dependency was messed up. Signed-off-by: Roman Zippel Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 810248f01729..dbdd3142215c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -241,8 +241,7 @@ config X86_ELAN config X86_VOYAGER bool "Voyager (NCR)" - depends on X86_32 - select SMP if !BROKEN + depends on X86_32 && (SMP || BROKEN) help Voyager is an MCA-based 32-way capable SMP architecture proprietary to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. @@ -254,9 +253,8 @@ config X86_VOYAGER config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" - select SMP + depends on SMP && X86_32 select NUMA - depends on X86_32 help This option is used for getting Linux to run on a (IBM/Sequent) NUMA multiquad box. This changes the way that processors are bootstrapped, @@ -327,8 +325,8 @@ config X86_RDC321X config X86_VSMP bool "Support for ScaleMP vSMP" - depends on X86_64 && PCI select PARAVIRT + depends on X86_64 help Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is supposed to run on these EM64T-based machines. Only choose this option @@ -1380,7 +1378,7 @@ endmenu menu "Bus options (PCI etc.)" config PCI - bool "PCI support" if !X86_VISWS + bool "PCI support" if !X86_VISWS && !X86_VSMP depends on !X86_VOYAGER default y select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) -- cgit v1.2.1 From dedd04be71cea3d5adb14c8f674e801911c89a2f Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sat, 1 Mar 2008 17:09:12 +0100 Subject: x86: reserve end-of-conventional-memory to 1MB on 32-bit This patch adds explicit detection of the EBDA and reservation of the rom and adapter address space 0xa0000-0x100000 to the i386 kernels. Before this patch, the EBDA size was hardcoded as 4Kb. Also, the reservation of the adapter range was done by modifying the e820 map which is now not necessary any longer, and that code is removed from copy_e820_map. The amount of conventional memory and the start of the EBDA are detected by reading the BIOS data area directly. Paravirtual environments do not provide this area, so we bail out early in that case. They will just have to set up a correct memory map to start with. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820_32.c | 27 +++++-------------- arch/x86/kernel/setup_32.c | 64 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 58 insertions(+), 33 deletions(-) diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c index 80444c5c9b14..0240cd778365 100644 --- a/arch/x86/kernel/e820_32.c +++ b/arch/x86/kernel/e820_32.c @@ -450,38 +450,25 @@ int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) * thinkpad 560x, for example, does not cooperate with the memory * detection code.) */ -int __init copy_e820_map(struct e820entry * biosmap, int nr_map) +int __init copy_e820_map(struct e820entry *biosmap, int nr_map) { /* Only one memory region (or negative)? Ignore it */ if (nr_map < 2) return -1; do { - unsigned long long start = biosmap->addr; - unsigned long long size = biosmap->size; - unsigned long long end = start + size; - unsigned long type = biosmap->type; + u64 start = biosmap->addr; + u64 size = biosmap->size; + u64 end = start + size; + u32 type = biosmap->type; /* Overflow in 64 bits? Ignore the memory map. */ if (start > end) return -1; - /* - * Some BIOSes claim RAM in the 640k - 1M region. - * Not right. Fix it up. - */ - if (type == E820_RAM) { - if (start < 0x100000ULL && end > 0xA0000ULL) { - if (start < 0xA0000ULL) - add_memory_region(start, 0xA0000ULL-start, type); - if (end <= 0x100000ULL) - continue; - start = 0x100000ULL; - size = end - start; - } - } add_memory_region(start, size, type); - } while (biosmap++,--nr_map); + } while (biosmap++, --nr_map); + return 0; } diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 2b3e5d45176b..14e293edd23f 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -385,15 +385,60 @@ unsigned long __init find_max_low_pfn(void) return max_low_pfn; } +#define BIOS_EBDA_SEGMENT 0x40E +#define BIOS_LOWMEM_KILOBYTES 0x413 + /* - * workaround for Dell systems that neglect to reserve EBDA + * The BIOS places the EBDA/XBDA at the top of conventional + * memory, and usually decreases the reported amount of + * conventional memory (int 0x12) too. This also contains a + * workaround for Dell systems that neglect to reserve EBDA. + * The same workaround also avoids a problem with the AMD768MPX + * chipset: reserve a page before VGA to prevent PCI prefetch + * into it (errata #56). Usually the page is reserved anyways, + * unless you have no PS/2 mouse plugged in. */ static void __init reserve_ebda_region(void) { - unsigned int addr; - addr = get_bios_ebda(); - if (addr) - reserve_bootmem(addr, PAGE_SIZE, BOOTMEM_DEFAULT); + unsigned int lowmem, ebda_addr; + + /* To determine the position of the EBDA and the */ + /* end of conventional memory, we need to look at */ + /* the BIOS data area. In a paravirtual environment */ + /* that area is absent. We'll just have to assume */ + /* that the paravirt case can handle memory setup */ + /* correctly, without our help. */ +#ifdef CONFIG_PARAVIRT + if ((boot_params.hdr.version >= 0x207) && + (boot_params.hdr.hardware_subarch != 0)) { + return; + } +#endif + + /* end of low (conventional) memory */ + lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); + lowmem <<= 10; + + /* start of EBDA area */ + ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); + ebda_addr <<= 4; + + /* Fixup: bios puts an EBDA in the top 64K segment */ + /* of conventional memory, but does not adjust lowmem. */ + if ((lowmem - ebda_addr) <= 0x10000) + lowmem = ebda_addr; + + /* Fixup: bios does not report an EBDA at all. */ + /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ + if ((ebda_addr == 0) && (lowmem >= 0x9f000)) + lowmem = 0x9f000; + + /* Paranoia: should never happen, but... */ + if ((lowmem == 0) || (lowmem >= 0x100000)) + lowmem = 0x9f000; + + /* reserve all memory between lowmem and the 1MB mark */ + reserve_bootmem(lowmem, 0x100000 - lowmem, BOOTMEM_DEFAULT); } #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -617,16 +662,9 @@ void __init setup_bootmem_allocator(void) */ reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT); - /* reserve EBDA region, it's a 4K region */ + /* reserve EBDA region */ reserve_ebda_region(); - /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent - PCI prefetch into it (errata #56). Usually the page is reserved anyways, - unless you have no PS/2 mouse plugged in. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 6) - reserve_bootmem(0xa0000 - 4096, 4096, BOOTMEM_DEFAULT); - #ifdef CONFIG_SMP /* * But first pinch a few for the stack/trampoline stuff -- cgit v1.2.1 From f6eb62b6924b99ec7da97fb6f554685a9ad6dce4 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Mon, 25 Feb 2008 19:07:51 +0100 Subject: x86: reserve_early end-of-conventional-memory to 1MB, 64-bit Explicitly reserve_early the whole address range from the end of conventional memory as reported by the bios data area up to the 1Mb mark. Regard the info retrieved from the BIOS data area with a bit of paranoia, though, because some biosses forget to register the EBDA correctly. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 45 +++++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 38f32e798a99..b684552347df 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -49,33 +49,42 @@ static void __init copy_bootdata(char *real_mode_data) } } -#define EBDA_ADDR_POINTER 0x40E +#define BIOS_EBDA_SEGMENT 0x40E +#define BIOS_LOWMEM_KILOBYTES 0x413 +/* + * The BIOS places the EBDA/XBDA at the top of conventional + * memory, and usually decreases the reported amount of + * conventional memory (int 0x12) too. + */ static __init void reserve_ebda(void) { - unsigned ebda_addr, ebda_size; + unsigned int lowmem, ebda_addr; - /* - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E - */ - ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER); + /* end of low (conventional) memory */ + lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); + lowmem <<= 10; + + /* start of EBDA area */ + ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); ebda_addr <<= 4; - if (!ebda_addr) - return; + /* Fixup: bios puts an EBDA in the top 64K segment */ + /* of conventional memory, but does not adjust lowmem. */ + if ((lowmem - ebda_addr) <= 0x10000) + lowmem = ebda_addr; - ebda_size = *(unsigned short *)__va(ebda_addr); + /* Fixup: bios does not report an EBDA at all. */ + /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ + if ((ebda_addr == 0) && (lowmem >= 0x9f000)) + lowmem = 0x9f000; - /* Round EBDA up to pages */ - if (ebda_size == 0) - ebda_size = 1; - ebda_size <<= 10; - ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE); - if (ebda_size > 64*1024) - ebda_size = 64*1024; + /* Paranoia: should never happen, but... */ + if (lowmem >= 0x100000) + lowmem = 0xa0000; - reserve_early(ebda_addr, ebda_addr + ebda_size, "EBDA"); + /* reserve all memory between lowmem and the 1MB mark */ + reserve_early(lowmem, 0x100000, "BIOS reserved"); } void __init x86_64_start_kernel(char * real_mode_data) -- cgit v1.2.1 From 320a6b2efceccb652befca0b1c9a92d6e4256ef6 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sat, 1 Mar 2008 17:12:43 +0100 Subject: x86: reserve end-of-conventional-memory to 1MB, 64-bit This patch is an add-on to the 64-bit ebda patch. It makes the functions reserve_ebda_region (renamed from reserve_ebda) and copy_e820_map equal to the 32-bit versions of the previous patch. Changes: Use u64 and u32 for local variables in copy_e820_map. The amount of conventional memory and the start of the EBDA are detected by reading the BIOS data area directly. Paravirtual environments do not provide this area, so we bail out early in that case. They will just have to set up a correct memory map to start with. Add a safety net for zeroed out BIOS data area. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820_64.c | 8 ++++---- arch/x86/kernel/head64.c | 28 +++++++++++++++++++++++----- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 8b914a833ac6..4a0953857cb2 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -621,10 +621,10 @@ static int __init copy_e820_map(struct e820entry *biosmap, int nr_map) return -1; do { - unsigned long start = biosmap->addr; - unsigned long size = biosmap->size; - unsigned long end = start + size; - unsigned long type = biosmap->type; + u64 start = biosmap->addr; + u64 size = biosmap->size; + u64 end = start + size; + u32 type = biosmap->type; /* Overflow in 64 bits? Ignore the memory map. */ if (start > end) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index b684552347df..269a6b481fe6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -55,12 +55,30 @@ static void __init copy_bootdata(char *real_mode_data) /* * The BIOS places the EBDA/XBDA at the top of conventional * memory, and usually decreases the reported amount of - * conventional memory (int 0x12) too. + * conventional memory (int 0x12) too. This also contains a + * workaround for Dell systems that neglect to reserve EBDA. + * The same workaround also avoids a problem with the AMD768MPX + * chipset: reserve a page before VGA to prevent PCI prefetch + * into it (errata #56). Usually the page is reserved anyways, + * unless you have no PS/2 mouse plugged in. */ -static __init void reserve_ebda(void) +static void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; + /* To determine the position of the EBDA and the */ + /* end of conventional memory, we need to look at */ + /* the BIOS data area. In a paravirtual environment */ + /* that area is absent. We'll just have to assume */ + /* that the paravirt case can handle memory setup */ + /* correctly, without our help. */ +#ifdef CONFIG_PARAVIRT + if ((boot_params.hdr.version >= 0x207) && + (boot_params.hdr.hardware_subarch != 0)) { + return; + } +#endif + /* end of low (conventional) memory */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); lowmem <<= 10; @@ -80,8 +98,8 @@ static __init void reserve_ebda(void) lowmem = 0x9f000; /* Paranoia: should never happen, but... */ - if (lowmem >= 0x100000) - lowmem = 0xa0000; + if ((lowmem == 0) || (lowmem >= 0x100000)) + lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ reserve_early(lowmem, 0x100000, "BIOS reserved"); @@ -140,7 +158,7 @@ void __init x86_64_start_kernel(char * real_mode_data) reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); } - reserve_ebda(); + reserve_ebda_region(); /* * At this point everything still needed from the boot loader -- cgit v1.2.1 From ba748d221eb74b849453a94fdf0e1d0566b407d7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Mar 2008 09:37:41 +0100 Subject: x86: warn about RAM pages in ioremap() Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index e5608d380176..3d0a589d92c4 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -149,9 +149,11 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, for (pfn = phys_addr >> PAGE_SHIFT; (pfn << PAGE_SHIFT) < last_addr; pfn++) { - if (page_is_ram(pfn) && pfn_valid(pfn) && - !PageReserved(pfn_to_page(pfn))) + int is_ram = page_is_ram(pfn); + + if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) return NULL; + WARN_ON_ONCE(is_ram); } switch (mode) { -- cgit v1.2.1 From 40869cd038a0ecb867a7227aba46806224e4d11d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Mar 2008 13:55:32 +0100 Subject: x86: redo cded932b75ab0a5f9181e redo commit cded932b75ab0a5f9181e. Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_32.h | 4 +++- include/asm-x86/pgtable_64.h | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 4e6a0fca0b47..997c36c6b4d7 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -90,7 +90,9 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad(x) ((pmd_val(x) \ + & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ + != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 6ef09914acbe..0a5081c98ae1 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -153,12 +153,14 @@ static inline unsigned long pgd_bad(pgd_t pgd) static inline unsigned long pud_bad(pud_t pud) { - return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return pud_val(pud) & + ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } static inline unsigned long pmd_bad(pmd_t pmd) { - return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return pmd_val(pmd) & + ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } #define pte_none(x) (!pte_val(x)) -- cgit v1.2.1 From 9fc34113f6880b215cbea4e7017fc818700384c2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Mar 2008 09:53:17 +0100 Subject: x86: debug pmd_bad() Signed-off-by: Ingo Molnar --- arch/x86/mm/pgtable_32.c | 7 +++++++ include/asm-x86/pgtable_32.h | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 2f9e9afcb9f4..76e4f4d26272 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -381,3 +381,10 @@ void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) } #endif + +int pmd_bad(pmd_t pmd) +{ + WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd)); + + return pmd_bad_v1(pmd); +} diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 997c36c6b4d7..1e2c0d839528 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -90,7 +90,11 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) \ + +extern int pmd_bad(pmd_t pmd); + +#define pmd_bad_v1(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad_v2(x) ((pmd_val(x) \ & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ != _KERNPG_TABLE) -- cgit v1.2.1 From 3c2047cd32b1a8c782d7efab72707e7daa251625 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 4 Mar 2008 23:07:50 +1100 Subject: x86: if we cannot calibrate the TSC, we panic. The current tsc_init() clears the TSC feature bit if the TSC khz cannot be calculated, causing us to panic in arch/x86/kernel/cpu/bugs.c check_config(). We should simply mark it unstable. Frankly, someone should take an axe to this code. mark_tsc_unstable() not only marks it unstable, but sets tsc_enabled to 0, which seems redundant but is actually important here because means it won't be used by sched_clock() either. Perhaps a tristate enum "UNUSABLE, UNSTABLE, OK" would be clearer, and separate mark_tsc_unstable() and mark_tsc_broken() functions? Signed-off-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc_32.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index c2241e04ea5f..68657d8526fb 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -392,13 +392,15 @@ void __init tsc_init(void) int cpu; if (!cpu_has_tsc) - goto out_no_tsc; + return; cpu_khz = calculate_cpu_khz(); tsc_khz = cpu_khz; - if (!cpu_khz) - goto out_no_tsc; + if (!cpu_khz) { + mark_tsc_unstable("could not calculate TSC khz"); + return; + } printk("Detected %lu.%03lu MHz processor.\n", (unsigned long)cpu_khz / 1000, @@ -431,9 +433,4 @@ void __init tsc_init(void) tsc_enabled = 1; clocksource_register(&clocksource_tsc); - - return; - -out_no_tsc: - setup_clear_cpu_cap(X86_FEATURE_TSC); } -- cgit v1.2.1 From 0d7a1819e97ef89be5bcbb4b724acb9f6c873c97 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Mon, 3 Mar 2008 12:49:09 +0100 Subject: x86: wmb() confusion in system.h Comment says wmb is a nop, but it is implemented as lock addl below... Should it be compiled to nop if we know we are running on "good" Intel cpu? At least remove confusing comment for now. Signed-off-by: Pavel Machek Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 9cff02ffe6c2..428d9471497f 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -296,16 +296,7 @@ void default_idle(void); */ #ifdef CONFIG_X86_32 /* - * For now, "wmb()" doesn't actually do anything, as all - * Intel CPU's follow what Intel calls a *Processor Order*, - * in which all writes are seen in the program order even - * outside the CPU. - * - * I expect future Intel CPU's to have a weaker ordering, - * but I'd also expect them to finally get their act together - * and add some real memory barriers if so. - * - * Some non intel clones support out of order store. wmb() ceases to be a + * Some non-Intel clones support out of order store. wmb() ceases to be a * nop for these. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -- cgit v1.2.1 From 2fde61fdb00c2337efc56cfbb05bde8a42864e65 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 4 Mar 2008 19:57:42 +0100 Subject: x86: reserve end-of-conventional-memory to 1MB, 32-bit, use paravirt_enabled Jeremy Fitzhardinge pointed out that looking at the boot_params struct to determine if the system is running in a paravirtual environment is not reliable for the Xen case, currently. He also points out that there already exists a function to determine if the system is running in a paravirtual environment. So let's use that instead. This gets rid of the preprocessor test too. Signed-off-by: Alexander van Heukelum Acked-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_32.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 14e293edd23f..fd639d9f79b6 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -64,6 +64,7 @@ #include #include #include +#include /* This value is set up by the early boot code to point to the value immediately after the boot time page tables. It contains a *physical* @@ -408,12 +409,8 @@ static void __init reserve_ebda_region(void) /* that area is absent. We'll just have to assume */ /* that the paravirt case can handle memory setup */ /* correctly, without our help. */ -#ifdef CONFIG_PARAVIRT - if ((boot_params.hdr.version >= 0x207) && - (boot_params.hdr.hardware_subarch != 0)) { + if (paravirt_enabled()) return; - } -#endif /* end of low (conventional) memory */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); -- cgit v1.2.1 From ecd94c0809eb0ff50b628fa061c531a6fbf2fbbc Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 4 Mar 2008 20:12:28 +0100 Subject: x86: reserve end-of-conventional-memory to 1MB, 64-bit, use paravirt_enabled Jeremy Fitzhardinge pointed out that looking at the boot_params struct to determine if the system is running in a paravirtual environment is not reliable for the Xen case, currently. He also points out that there already exists a function to determine if the system is running in a paravirtual environment. So let's use that instead. This gets rid of the preprocessor test too. Signed-off-by: Alexander van Heukelum Acked-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 269a6b481fe6..48be76cda93b 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -72,12 +72,8 @@ static void __init reserve_ebda_region(void) /* that area is absent. We'll just have to assume */ /* that the paravirt case can handle memory setup */ /* correctly, without our help. */ -#ifdef CONFIG_PARAVIRT - if ((boot_params.hdr.version >= 0x207) && - (boot_params.hdr.hardware_subarch != 0)) { + if (paravirt_enabled()) return; - } -#endif /* end of low (conventional) memory */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); -- cgit v1.2.1 From 23b55bd9f33a1812a664e548803db34c9bec56e8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 10:24:37 +0100 Subject: x86: clean up switch_to() Make the code more readable and more hackable: - use symbolic asm parameters - use readable indentation - add comments that explains the details No code changed: kernel/sched.o: text data bss dec hex filename 28626 684 2640 31950 7cce sched.o.before 28626 684 2640 31950 7cce sched.o.after md5: 2823d406c18b781975cdb2e7cfea0059 sched.o.before.asm 2823d406c18b781975cdb2e7cfea0059 sched.o.after.asm Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 428d9471497f..299ae9605cb1 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -27,22 +27,38 @@ struct task_struct *__switch_to(struct task_struct *prev, * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. */ -#define switch_to(prev, next, last) do { \ +#define switch_to(prev, next, last) \ +do { \ unsigned long esi, edi; \ - asm volatile("pushfl\n\t" /* Save flags */ \ - "pushl %%ebp\n\t" \ - "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %5,%%esp\n\t" /* restore ESP */ \ - "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %6\n\t" /* restore EIP */ \ - "jmp __switch_to\n" \ - "1:\t" \ - "popl %%ebp\n\t" \ - "popfl" \ - :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ - "=a" (last), "=S" (esi), "=D" (edi) \ - :"m" (next->thread.sp), "m" (next->thread.ip), \ - "2" (prev), "d" (next)); \ + \ + asm volatile( \ + "pushfl \n\t" /* save flags */ \ + "pushl %%ebp \n\t" /* save EBP */ \ + "movl %%esp,%[prev_sp] \n\t" /* save ESP */ \ + "movl %[next_sp],%%esp \n\t" /* restore ESP */ \ + "movl $1f,%[prev_ip] \n\t" /* save EIP */ \ + "pushl %[next_ip] \n\t" /* restore EIP */ \ + "jmp __switch_to \n" /* regparm call */ \ + "1: \t" \ + "popl %%ebp \n\t" /* restore EBP */ \ + "popfl \n" /* restore flags */ \ + \ + /* output parameters */ \ + : [prev_sp] "=m" (prev->thread.sp), \ + [prev_ip] "=m" (prev->thread.ip), \ + "=a" (last), \ + \ + /* clobbered output registers: */ \ + "=S" (esi), "=D" (edi) \ + \ + /* input parameters: */ \ + : [next_sp] "m" (next->thread.sp), \ + [next_ip] "m" (next->thread.ip), \ + \ + /* regparm parameters for __switch_to(): */ \ + [prev] "a" (prev), \ + [next] "d" (next) \ + ); \ } while (0) /* -- cgit v1.2.1 From 8b6451fe5cf78909f28d3762f77df060c8603cd0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 10:46:38 +0100 Subject: x86: fix switch_to() clobbers Liu Pingfan noticed that switch_to() clobbers more registers than its asm constraints specify. We get away with this due to luck mostly - schedule() by its nature only has 'local' state which gets reloaded automatically. Fix it nevertheless, we could hit this anytime. it turns out that with the extra constraints gcc manages to make schedule() even more compact: text data bss dec hex filename 28626 684 2640 31950 7cce sched.o.before 28613 684 2640 31937 7cc1 sched.o.after Reported-by: Liu Pingfan Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 299ae9605cb1..33b0017156a7 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -29,7 +29,14 @@ struct task_struct *__switch_to(struct task_struct *prev, */ #define switch_to(prev, next, last) \ do { \ - unsigned long esi, edi; \ + /* \ + * Context-switching clobbers all registers, so we clobber \ + * them explicitly, via unused output variables. \ + * (EAX and EBP is not listed because EBP is saved/restored \ + * explicitly for wchan access and EAX is the return value of \ + * __switch_to()) \ + */ \ + unsigned long ebx, ecx, edx, esi, edi; \ \ asm volatile( \ "pushfl \n\t" /* save flags */ \ @@ -49,6 +56,7 @@ do { \ "=a" (last), \ \ /* clobbered output registers: */ \ + "=b" (ebx), "=c" (ecx), "=d" (edx), \ "=S" (esi), "=D" (edi) \ \ /* input parameters: */ \ -- cgit v1.2.1 From c27cfeffad436816ecd500b8dc94acf348182b13 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:29 -0300 Subject: x86: commonize smp.h this is the first step of integrating smp.h between x86_64 and i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index f2e8319a6b0b..f250d1c3f8a0 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -1,5 +1,12 @@ +#ifndef _ASM_X86_SMP_H_ +#define _ASM_X86_SMP_H_ +#ifndef __ASSEMBLY__ + #ifdef CONFIG_X86_32 # include "smp_32.h" #else # include "smp_64.h" #endif + +#endif /* __ASSEMBLY__ */ +#endif -- cgit v1.2.1 From 639acb16e6b93342a786c01425cf8eb8ebbb1351 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:30 -0300 Subject: x86: merge extern function definitions move extern function definitions that are the same between smp_{32,64}.h to smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 4 ---- include/asm-x86/smp_64.h | 4 ---- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index f250d1c3f8a0..ad7b99dda0dc 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -8,5 +8,8 @@ # include "smp_64.h" #endif +extern void smp_alloc_memory(void); +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 56152e312287..27812258ac6d 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -22,10 +22,6 @@ extern cpumask_t cpu_callin_map; extern int smp_num_siblings; extern unsigned int num_processors; -extern void smp_alloc_memory(void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); - extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index e0a75519ad21..2c21df289da4 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -19,10 +19,6 @@ extern cpumask_t cpu_initialized; extern int smp_num_siblings; extern unsigned int num_processors; -extern void smp_alloc_memory(void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); - extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -- cgit v1.2.1 From 53ebef4961c7d5347b4fa2b878258ccd11fc9663 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:31 -0300 Subject: x86: merge extern variables definitions move extern definitions that are the same between smp_{32,64}.h to smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 4 ---- include/asm-x86/smp_64.h | 4 ---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index ad7b99dda0dc..c130a87c956d 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -1,6 +1,12 @@ #ifndef _ASM_X86_SMP_H_ #define _ASM_X86_SMP_H_ #ifndef __ASSEMBLY__ +#include + +extern cpumask_t cpu_callout_map; + +extern int smp_num_siblings; +extern unsigned int num_processors; #ifdef CONFIG_X86_32 # include "smp_32.h" diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 27812258ac6d..9a4057d94367 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -16,12 +16,8 @@ # endif #endif -extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; -extern int smp_num_siblings; -extern unsigned int num_processors; - extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 2c21df289da4..284f701f2a8b 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -13,12 +13,8 @@ #include #include -extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; -extern int smp_num_siblings; -extern unsigned int num_processors; - extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -- cgit v1.2.1 From 16694024d6d6fa84dfcf5400b53afe1e75cebf0d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:32 -0300 Subject: x86: define smp_ops in common header x86_64 will benefit from it Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 14 ++++++++++++++ include/asm-x86/smp_32.h | 14 -------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c130a87c956d..d11b92b56353 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -8,6 +8,20 @@ extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +struct smp_ops { + void (*smp_prepare_boot_cpu)(void); + void (*smp_prepare_cpus)(unsigned max_cpus); + int (*cpu_up)(unsigned cpu); + void (*smp_cpus_done)(unsigned max_cpus); + + void (*smp_send_stop)(void); + void (*smp_send_reschedule)(int cpu); + int (*smp_call_function_mask)(cpumask_t mask, + void (*func)(void *info), void *info, + int wait); +}; + + #ifdef CONFIG_X86_32 # include "smp_32.h" #else diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 9a4057d94367..72faad6509c7 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -38,20 +38,6 @@ extern void remove_siblinginfo(int cpu); /* Globals due to paravirt */ extern void set_cpu_sibling_map(int cpu); -struct smp_ops -{ - void (*smp_prepare_boot_cpu)(void); - void (*smp_prepare_cpus)(unsigned max_cpus); - int (*cpu_up)(unsigned cpu); - void (*smp_cpus_done)(unsigned max_cpus); - - void (*smp_send_stop)(void); - void (*smp_send_reschedule)(int cpu); - int (*smp_call_function_mask)(cpumask_t mask, - void (*func)(void *info), void *info, - int wait); -}; - #ifdef CONFIG_SMP extern struct smp_ops smp_ops; -- cgit v1.2.1 From c76cb36846da6d5d6fb2951968869faa4fd1001d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:33 -0300 Subject: x86: move smp_ops extern declaration to common header the smp_ops symbol is temporarily defined in smp_64.c, but it will soon be unified Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 2 ++ include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 2 -- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 2fd74b06db67..80dba12b56af 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -528,3 +528,5 @@ asmlinkage void smp_call_function_interrupt(void) } } +struct smp_ops smp_ops; +EXPORT_SYMBOL_GPL(smp_ops); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d11b92b56353..ee98beeb7511 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -21,6 +21,9 @@ struct smp_ops { int wait); }; +#ifdef CONFIG_SMP +extern struct smp_ops smp_ops; +#endif #ifdef CONFIG_X86_32 # include "smp_32.h" diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 72faad6509c7..74755e8ffc77 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,8 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -extern struct smp_ops smp_ops; - static inline void smp_prepare_boot_cpu(void) { smp_ops.smp_prepare_boot_cpu(); -- cgit v1.2.1 From 8678969e60d80527d96d2af0011e72c87c9c1fe5 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:34 -0300 Subject: x86: merge smp_send_reschedule function definition is moved to common header, x86_64 version is now called native_smp_send_reschedule Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 7 +++++-- include/asm-x86/smp.h | 5 +++++ include/asm-x86/smp_32.h | 4 ---- include/asm-x86/smp_64.h | 2 -- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 80dba12b56af..fd1816123496 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -290,8 +290,9 @@ void flush_tlb_all(void) * anything. Worst case is that we lose a reschedule ... */ -void smp_send_reschedule(int cpu) +static void native_smp_send_reschedule(int cpu) { + WARN_ON(cpu_is_offline(cpu)); send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } @@ -528,5 +529,7 @@ asmlinkage void smp_call_function_interrupt(void) } } -struct smp_ops smp_ops; +struct smp_ops smp_ops = { + .smp_send_reschedule = native_smp_send_reschedule, +}; EXPORT_SYMBOL_GPL(smp_ops); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index ee98beeb7511..28f33c03d793 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -23,6 +23,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; + +static inline void smp_send_reschedule(int cpu) +{ + smp_ops.smp_send_reschedule(cpu); +} #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 74755e8ffc77..c60a3dd3e802 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -60,10 +60,6 @@ static inline void smp_send_stop(void) { smp_ops.smp_send_stop(); } -static inline void smp_send_reschedule(int cpu) -{ - smp_ops.smp_send_reschedule(cpu); -} static inline int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), void *info, int wait) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 284f701f2a8b..b9204584aa06 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -65,8 +65,6 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } -extern void smp_send_reschedule(int cpu); - #else /* CONFIG_SMP */ extern unsigned int boot_cpu_id; -- cgit v1.2.1 From 64b1a21e0924dca7ea3b7cf4287fa719c8ba7fc5 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:35 -0300 Subject: x86: unify smp_call_function_mask definition is moved to common header, x86_64 function name now is native_smp_call_function_mask Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 7 ++++--- include/asm-x86/smp.h | 7 +++++++ include/asm-x86/smp_32.h | 6 ------ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index fd1816123496..225b765db5a2 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -386,9 +386,9 @@ static int __smp_call_function_mask(cpumask_t mask, * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +int native_smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) { int ret; @@ -531,5 +531,6 @@ asmlinkage void smp_call_function_interrupt(void) struct smp_ops smp_ops = { .smp_send_reschedule = native_smp_send_reschedule, + .smp_call_function_mask = native_smp_call_function_mask, }; EXPORT_SYMBOL_GPL(smp_ops); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 28f33c03d793..d9782f4f469e 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -28,6 +28,13 @@ static inline void smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); } + +static inline int smp_call_function_mask(cpumask_t mask, + void (*func) (void *info), void *info, + int wait) +{ + return smp_ops.smp_call_function_mask(mask, func, info, wait); +} #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index c60a3dd3e802..d9337ee8c2fc 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -60,12 +60,6 @@ static inline void smp_send_stop(void) { smp_ops.smp_send_stop(); } -static inline int smp_call_function_mask(cpumask_t mask, - void (*func) (void *info), void *info, - int wait) -{ - return smp_ops.smp_call_function_mask(mask, func, info, wait); -} void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); -- cgit v1.2.1 From 71d195492a6e0b22135a7156af1b41c0f99a116b Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:36 -0300 Subject: x86: unify __cpu_up. function definition is moved to common header. x86_64 version is now called native_cpu_up Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 1 + arch/x86/kernel/smpboot_64.c | 2 +- include/asm-x86/smp.h | 7 +++++++ include/asm-x86/smp_32.h | 5 ----- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 225b765db5a2..7cc20a3c6c19 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -532,5 +532,6 @@ asmlinkage void smp_call_function_interrupt(void) struct smp_ops smp_ops = { .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, + .cpu_up = native_cpu_up, }; EXPORT_SYMBOL_GPL(smp_ops); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 0880f2c388a9..e381fe7792c4 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -929,7 +929,7 @@ void __init smp_prepare_boot_cpu(void) /* * Entry point to boot a CPU. */ -int __cpuinit __cpu_up(unsigned int cpu) +int __cpuinit native_cpu_up(unsigned int cpu) { int apicid = cpu_present_to_apicid(cpu); unsigned long flags; diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d9782f4f469e..7dd71410fe7d 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -24,6 +24,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; +static inline int __cpu_up(unsigned int cpu) +{ + return smp_ops.cpu_up(cpu); +} + static inline void smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); @@ -35,6 +40,8 @@ static inline int smp_call_function_mask(cpumask_t mask, { return smp_ops.smp_call_function_mask(mask, func, info, wait); } + +int native_cpu_up(unsigned int cpunum); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index d9337ee8c2fc..a7fab8e1517a 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -47,10 +47,6 @@ static inline void smp_prepare_cpus(unsigned int max_cpus) { smp_ops.smp_prepare_cpus(max_cpus); } -static inline int __cpu_up(unsigned int cpu) -{ - return smp_ops.cpu_up(cpu); -} static inline void smp_cpus_done(unsigned int max_cpus) { smp_ops.smp_cpus_done(max_cpus); @@ -63,7 +59,6 @@ static inline void smp_send_stop(void) void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); -int native_cpu_up(unsigned int cpunum); void native_smp_cpus_done(unsigned int max_cpus); #ifndef CONFIG_PARAVIRT -- cgit v1.2.1 From 1e3fac83da056f26bcb96e13967c157de55bf2ef Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:37 -0300 Subject: x86: unify prepare_boot_cpu definition is moved to common header. x86_64 version is now called native_prepare_boot_cpu Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 1 + arch/x86/kernel/smpboot_64.c | 2 +- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 5 ----- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 7cc20a3c6c19..05116c1ddb6f 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -530,6 +530,7 @@ asmlinkage void smp_call_function_interrupt(void) } struct smp_ops smp_ops = { + .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, .cpu_up = native_cpu_up, diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index e381fe7792c4..47e654cdc92d 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -918,7 +918,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* * Early setup to make printk work. */ -void __init smp_prepare_boot_cpu(void) +void __init native_smp_prepare_boot_cpu(void) { int me = smp_processor_id(); /* already set me in cpu_online_map in boot_cpu_init() */ diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 7dd71410fe7d..a2d69a1bec24 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -24,6 +24,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; +static inline void smp_prepare_boot_cpu(void) +{ + smp_ops.smp_prepare_boot_cpu(); +} + static inline int __cpu_up(unsigned int cpu) { return smp_ops.cpu_up(cpu); @@ -41,6 +46,7 @@ static inline int smp_call_function_mask(cpumask_t mask, return smp_ops.smp_call_function_mask(mask, func, info, wait); } +void native_smp_prepare_boot_cpu(void); int native_cpu_up(unsigned int cpunum); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index a7fab8e1517a..2c7ecfaa8234 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,10 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_prepare_boot_cpu(void) -{ - smp_ops.smp_prepare_boot_cpu(); -} static inline void smp_prepare_cpus(unsigned int max_cpus) { smp_ops.smp_prepare_cpus(max_cpus); @@ -57,7 +53,6 @@ static inline void smp_send_stop(void) smp_ops.smp_send_stop(); } -void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); -- cgit v1.2.1 From 7557da67208f6ed3a1073594b7597bf20c9eb63a Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:38 -0300 Subject: x86: unify smp_prepare_cpus definition is moved to common header. x86_64 version is now called native_smp_prepare_cpus Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 1 + arch/x86/kernel/smpboot_64.c | 2 +- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 5 ----- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 05116c1ddb6f..c520374be1a8 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -531,6 +531,7 @@ asmlinkage void smp_call_function_interrupt(void) struct smp_ops smp_ops = { .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, + .smp_prepare_cpus = native_smp_prepare_cpus, .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, .cpu_up = native_cpu_up, diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 47e654cdc92d..b106983050b0 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -867,7 +867,7 @@ static void __init smp_cpu_index_default(void) * Prepare for SMP bootup. The MP table or ACPI has been read * earlier. Just do some sanity checking here and enable APIC mode. */ -void __init smp_prepare_cpus(unsigned int max_cpus) +void __init native_smp_prepare_cpus(unsigned int max_cpus) { nmi_watchdog_default(); smp_cpu_index_default(); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index a2d69a1bec24..31bd99ddd8c2 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -29,6 +29,11 @@ static inline void smp_prepare_boot_cpu(void) smp_ops.smp_prepare_boot_cpu(); } +static inline void smp_prepare_cpus(unsigned int max_cpus) +{ + smp_ops.smp_prepare_cpus(max_cpus); +} + static inline int __cpu_up(unsigned int cpu) { return smp_ops.cpu_up(cpu); @@ -47,6 +52,7 @@ static inline int smp_call_function_mask(cpumask_t mask, } void native_smp_prepare_boot_cpu(void); +void native_smp_prepare_cpus(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 2c7ecfaa8234..50785389680b 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,10 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_prepare_cpus(unsigned int max_cpus) -{ - smp_ops.smp_prepare_cpus(max_cpus); -} static inline void smp_cpus_done(unsigned int max_cpus) { smp_ops.smp_cpus_done(max_cpus); @@ -53,7 +49,6 @@ static inline void smp_send_stop(void) smp_ops.smp_send_stop(); } -void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); #ifndef CONFIG_PARAVIRT -- cgit v1.2.1 From c559764923dacef301116a248695856e6eb96e48 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:39 -0300 Subject: x86: unify smp_cpus_done definition is moved to common header. x86_64 version is now called native_smp_cpus_done Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 2 ++ arch/x86/kernel/smpboot_64.c | 2 +- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 7 ------- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index c520374be1a8..275101ab4b6d 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -532,6 +532,8 @@ asmlinkage void smp_call_function_interrupt(void) struct smp_ops smp_ops = { .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, .smp_prepare_cpus = native_smp_prepare_cpus, + .smp_cpus_done = native_smp_cpus_done, + .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, .cpu_up = native_cpu_up, diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index b106983050b0..fd0d3a93b995 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -987,7 +987,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) /* * Finish the SMP boot. */ -void __init smp_cpus_done(unsigned int max_cpus) +void __init native_smp_cpus_done(unsigned int max_cpus) { smp_cleanup_boot(); setup_ioapic_dest(); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 31bd99ddd8c2..9620165d3b78 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -34,6 +34,11 @@ static inline void smp_prepare_cpus(unsigned int max_cpus) smp_ops.smp_prepare_cpus(max_cpus); } +static inline void smp_cpus_done(unsigned int max_cpus) +{ + smp_ops.smp_cpus_done(max_cpus); +} + static inline int __cpu_up(unsigned int cpu) { return smp_ops.cpu_up(cpu); @@ -53,6 +58,7 @@ static inline int smp_call_function_mask(cpumask_t mask, void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); +void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 50785389680b..bc90a4ed3235 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,18 +39,11 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_cpus_done(unsigned int max_cpus) -{ - smp_ops.smp_cpus_done(max_cpus); -} - static inline void smp_send_stop(void) { smp_ops.smp_send_stop(); } -void native_smp_cpus_done(unsigned int max_cpus); - #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif -- cgit v1.2.1 From 93b016f8f393c1f8c27e8c4df06ad1420fac65f5 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:40 -0300 Subject: x86: move disabled_cpus to common header disabled_cpus is (up to now) a x86_64-only contruction. But it's extern declaration can be moved to common header anyway Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_64.h | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 9620165d3b78..be8a511fdb1d 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -2,6 +2,7 @@ #define _ASM_X86_SMP_H_ #ifndef __ASSEMBLY__ #include +#include extern cpumask_t cpu_callout_map; @@ -60,6 +61,8 @@ void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); + +extern unsigned disabled_cpus; #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index b9204584aa06..c7a00caa6ec6 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -44,7 +44,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); -extern unsigned __cpuinitdata disabled_cpus; #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.1 From 7b1292e2371e3ae2ac69fbb899d539ddc7b53a27 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:41 -0300 Subject: x86: use disabled_cpus in i386 this patch allows a cpu to be marked as present but disabled in i386, just as x86_64 currently does. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index f349e68e45a0..b2aded3fbfec 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -70,6 +70,8 @@ unsigned int boot_cpu_physical_apicid = -1U; /* Internal processor count */ unsigned int num_processors; +unsigned disabled_cpus __cpuinitdata; + /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; @@ -108,8 +110,10 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) int ver, apicid; physid_mask_t phys_cpu; - if (!(m->mpc_cpuflag & CPU_ENABLED)) + if (!(m->mpc_cpuflag & CPU_ENABLED)) { + disabled_cpus++; return; + } apicid = mpc_apic_id(m, translation_table[mpc_record]); -- cgit v1.2.1 From 68a1c3f8cd893f5c3c1396fec5be7d8acac4fc93 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:42 -0300 Subject: x86: move prefill_possible_map to common file this patches moves prefill_possible_map() to smpboot.c Right now it is x86_64-specific, but nothing intrinsically prevents it to be used by i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/smpboot.c | 53 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_64.c | 51 ------------------------------------------ include/asm-x86/smp.h | 1 + include/asm-x86/smp_64.h | 1 - 5 files changed, 55 insertions(+), 53 deletions(-) create mode 100644 arch/x86/kernel/smpboot.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index df10327182d4..4c68bfc6df1d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,7 +46,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o -obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o +obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smpboot.o tsc_sync.o obj-$(CONFIG_X86_32_SMP) += smpcommon_32.o obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c new file mode 100644 index 000000000000..bffe10861390 --- /dev/null +++ b/arch/x86/kernel/smpboot.c @@ -0,0 +1,53 @@ +#include +#include + +#ifdef CONFIG_HOTPLUG_CPU + +int additional_cpus __initdata = -1; + +static __init int setup_additional_cpus(char *s) +{ + return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; +} +early_param("additional_cpus", setup_additional_cpus); + +/* + * cpu_possible_map should be static, it cannot change as cpu's + * are onlined, or offlined. The reason is per-cpu data-structures + * are allocated by some modules at init time, and dont expect to + * do this dynamically on cpu arrival/departure. + * cpu_present_map on the other hand can change dynamically. + * In case when cpu_hotplug is not compiled, then we resort to current + * behaviour, which is cpu_possible == cpu_present. + * - Ashok Raj + * + * Three ways to find out the number of additional hotplug CPUs: + * - If the BIOS specified disabled CPUs in ACPI/mptables use that. + * - The user can overwrite it with additional_cpus=NUM + * - Otherwise don't reserve additional CPUs. + * We do this because additional CPUs waste a lot of memory. + * -AK + */ +__init void prefill_possible_map(void) +{ + int i; + int possible; + + if (additional_cpus == -1) { + if (disabled_cpus > 0) + additional_cpus = disabled_cpus; + else + additional_cpus = 0; + } + possible = num_processors + additional_cpus; + if (possible > NR_CPUS) + possible = NR_CPUS; + + printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", + possible, max_t(int, possible - num_processors, 0)); + + for (i = 0; i < possible; i++) + cpu_set(i, cpu_possible_map); +} +#endif + diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index fd0d3a93b995..953b0ff72b65 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -749,51 +749,6 @@ static __init void disable_smp(void) cpu_set(0, per_cpu(cpu_core_map, 0)); } -#ifdef CONFIG_HOTPLUG_CPU - -int additional_cpus __initdata = -1; - -/* - * cpu_possible_map should be static, it cannot change as cpu's - * are onlined, or offlined. The reason is per-cpu data-structures - * are allocated by some modules at init time, and dont expect to - * do this dynamically on cpu arrival/departure. - * cpu_present_map on the other hand can change dynamically. - * In case when cpu_hotplug is not compiled, then we resort to current - * behaviour, which is cpu_possible == cpu_present. - * - Ashok Raj - * - * Three ways to find out the number of additional hotplug CPUs: - * - If the BIOS specified disabled CPUs in ACPI/mptables use that. - * - The user can overwrite it with additional_cpus=NUM - * - Otherwise don't reserve additional CPUs. - * We do this because additional CPUs waste a lot of memory. - * -AK - */ -__init void prefill_possible_map(void) -{ - int i; - int possible; - - if (additional_cpus == -1) { - if (disabled_cpus > 0) - additional_cpus = disabled_cpus; - else - additional_cpus = 0; - } - possible = num_processors + additional_cpus; - if (possible > NR_CPUS) - possible = NR_CPUS; - - printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", - possible, - max_t(int, possible - num_processors, 0)); - - for (i = 0; i < possible; i++) - cpu_set(i, cpu_possible_map); -} -#endif - /* * Various sanity checks. */ @@ -1087,12 +1042,6 @@ void __cpu_die(unsigned int cpu) printk(KERN_ERR "CPU %u didn't die...\n", cpu); } -static __init int setup_additional_cpus(char *s) -{ - return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; -} -early_param("additional_cpus", setup_additional_cpus); - #else /* ... !CONFIG_HOTPLUG_CPU */ int __cpu_disable(void) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index be8a511fdb1d..28cb1f8bb47e 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -63,6 +63,7 @@ void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); extern unsigned disabled_cpus; +extern void prefill_possible_map(void); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index c7a00caa6ec6..e5bc1be70827 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -43,7 +43,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern void prefill_possible_map(void); #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.1 From 7930e53422d3d06ea873199f66c288806d37cc94 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:43 -0300 Subject: x86: remove export for smp_call_function_mask. with this removal, exports for both i386 and x86_64, regarding the "smp_call_function" series are now the same. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 275101ab4b6d..a434f6c55f83 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -400,7 +400,6 @@ int native_smp_call_function_mask(cpumask_t mask, spin_unlock(&call_lock); return ret; } -EXPORT_SYMBOL(smp_call_function_mask); /* * smp_call_function_single - Run a function on a specific CPU -- cgit v1.2.1 From 3428f3d6caa3bc2adde050a2771a2821eb46f901 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:44 -0300 Subject: x86: remove irqs disabled warning. there's already a warning in the topmost function Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index a434f6c55f83..b040224927ca 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -420,9 +420,6 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, /* prevent preemption and reschedule on another processor */ int ret, me = get_cpu(); - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - if (cpu == me) { local_irq_disable(); func(info); -- cgit v1.2.1 From e32640a2cd530e1259a06e34a72b0cdb73738ce2 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:45 -0300 Subject: x86: create smpcommon.c This patch creates smpcommon.c with functions that are equal between architectures. The i386-only init_gdt is ifdef'd. Note that smpcommon.o figures twice in the Makefile: this is because sub-architectures like voyager that does not use the normal smp_$(BITS) files also have to access them Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 4 +- arch/x86/kernel/smp_64.c | 56 ---------------------------- arch/x86/kernel/smpcommon.c | 83 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpcommon_32.c | 81 ----------------------------------------- 4 files changed, 85 insertions(+), 139 deletions(-) create mode 100644 arch/x86/kernel/smpcommon.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4c68bfc6df1d..018d04d880db 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -47,8 +47,8 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smpboot.o tsc_sync.o -obj-$(CONFIG_X86_32_SMP) += smpcommon_32.o -obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o +obj-$(CONFIG_X86_32_SMP) += smpcommon.o +obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index b040224927ca..1d8b863fa357 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -401,62 +401,6 @@ int native_smp_call_function_mask(cpumask_t mask, return ret; } -/* - * smp_call_function_single - Run a function on a specific CPU - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Currently unused. - * @wait: If true, wait until function has completed on other CPUs. - * - * Retrurns 0 on success, else a negative status code. - * - * Does not return until the remote CPU is nearly ready to execute - * or is or has executed. - */ - -int smp_call_function_single (int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - /* prevent preemption and reschedule on another processor */ - int ret, me = get_cpu(); - - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); - - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); - -/* - * smp_call_function - run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: currently unused. - * @wait: If true, wait (atomically) until function has completed on other - * CPUs. - * - * Returns 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute func or are or have executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - * Actually there are a few legal cases, like panic. - */ -int smp_call_function (void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - return smp_call_function_mask(cpu_online_map, func, info, wait); -} -EXPORT_SYMBOL(smp_call_function); - static void stop_this_cpu(void *dummy) { local_irq_disable(); diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c new file mode 100644 index 000000000000..3449064d141a --- /dev/null +++ b/arch/x86/kernel/smpcommon.c @@ -0,0 +1,83 @@ +/* + * SMP stuff which is common to all sub-architectures. + */ +#include +#include + +#ifdef CONFIG_X86_32 +DEFINE_PER_CPU(unsigned long, this_cpu_off); +EXPORT_PER_CPU_SYMBOL(this_cpu_off); + +/* Initialize the CPU's GDT. This is either the boot CPU doing itself + (still using the master per-cpu area), or a CPU doing it for a + secondary which will soon come up. */ +__cpuinit void init_gdt(int cpu) +{ + struct desc_struct *gdt = get_cpu_gdt_table(cpu); + + pack_descriptor(&gdt[GDT_ENTRY_PERCPU], + __per_cpu_offset[cpu], 0xFFFFF, + 0x2 | DESCTYPE_S, 0x8); + + gdt[GDT_ENTRY_PERCPU].s = 1; + + per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; + per_cpu(cpu_number, cpu) = cpu; +} +#endif + +/** + * smp_call_function(): Run a function on all other CPUs. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @nonatomic: Unused. + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +int smp_call_function(void (*func) (void *info), void *info, int nonatomic, + int wait) +{ + return smp_call_function_mask(cpu_online_map, func, info, wait); +} +EXPORT_SYMBOL(smp_call_function); + +/** + * smp_call_function_single - Run a function on a specific CPU + * @cpu: The target CPU. Cannot be the calling CPU. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @nonatomic: Unused. + * @wait: If true, wait until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + */ +int smp_call_function_single(int cpu, void (*func) (void *info), void *info, + int nonatomic, int wait) +{ + /* prevent preemption and reschedule on another processor */ + int ret; + int me = get_cpu(); + if (cpu == me) { + local_irq_disable(); + func(info); + local_irq_enable(); + put_cpu(); + return 0; + } + + ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); + + put_cpu(); + return ret; +} +EXPORT_SYMBOL(smp_call_function_single); diff --git a/arch/x86/kernel/smpcommon_32.c b/arch/x86/kernel/smpcommon_32.c index 8bc38af29aef..8b137891791f 100644 --- a/arch/x86/kernel/smpcommon_32.c +++ b/arch/x86/kernel/smpcommon_32.c @@ -1,82 +1 @@ -/* - * SMP stuff which is common to all sub-architectures. - */ -#include -#include -DEFINE_PER_CPU(unsigned long, this_cpu_off); -EXPORT_PER_CPU_SYMBOL(this_cpu_off); - -/* Initialize the CPU's GDT. This is either the boot CPU doing itself - (still using the master per-cpu area), or a CPU doing it for a - secondary which will soon come up. */ -__cpuinit void init_gdt(int cpu) -{ - struct desc_struct *gdt = get_cpu_gdt_table(cpu); - - pack_descriptor(&gdt[GDT_ENTRY_PERCPU], - __per_cpu_offset[cpu], 0xFFFFF, - 0x2 | DESCTYPE_S, 0x8); - - gdt[GDT_ENTRY_PERCPU].s = 1; - - per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; - per_cpu(cpu_number, cpu) = cpu; -} - - -/** - * smp_call_function(): Run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - return smp_call_function_mask(cpu_online_map, func, info, wait); -} -EXPORT_SYMBOL(smp_call_function); - -/** - * smp_call_function_single - Run a function on a specific CPU - * @cpu: The target CPU. Cannot be the calling CPU. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - /* prevent preemption and reschedule on another processor */ - int ret; - int me = get_cpu(); - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); - - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); -- cgit v1.2.1 From 3a36d1e435af79ec3bc5ead871e5b22d5558ebf3 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:46 -0300 Subject: x86: provide __smp_call_function This function is used in smp_send_stop(). It's like smp_call_function_mask, but always go to all online cpus, and does not take any locks. It is added to x86_64, but will soon be unified in a common file Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 1d8b863fa357..aa2edb7f3a51 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -322,6 +322,38 @@ void unlock_ipi_call_lock(void) spin_unlock_irq(&call_lock); } +static void __smp_call_function(void (*func) (void *info), void *info, + int nonatomic, int wait) +{ + struct call_data_struct data; + int cpus = num_online_cpus() - 1; + + if (!cpus) + return; + + data.func = func; + data.info = info; + atomic_set(&data.started, 0); + data.wait = wait; + if (wait) + atomic_set(&data.finished, 0); + + call_data = &data; + mb(); + + /* Send a message to all other CPUs and wait for them to respond */ + send_IPI_allbutself(CALL_FUNCTION_VECTOR); + + /* Wait for response */ + while (atomic_read(&data.started) != cpus) + cpu_relax(); + + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); +} + + /* * this function sends a 'generic call function' IPI to all other CPU * of the system defined in the mask. @@ -424,7 +456,7 @@ void smp_send_stop(void) /* Don't deadlock on the call lock in panic */ nolock = !spin_trylock(&call_lock); local_irq_save(flags); - __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); + __smp_call_function(stop_this_cpu, NULL, 0, 0); if (!nolock) spin_unlock(&call_lock); disable_local_APIC(); -- cgit v1.2.1 From 2513926c286ca1d0d189c206966011bdd4080354 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:47 -0300 Subject: x86: change x86_64 smp_call_function_mask to look alike i386 the two versions (the inner version, and the outer version, that takes the locks) of smp_call_function_mask are made into one. With the changes, i386 and x86_64 versions look exactly the same. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_32.c | 2 +- arch/x86/kernel/smp_64.c | 57 ++++++++++++++---------------------------------- 2 files changed, 17 insertions(+), 42 deletions(-) diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index dc0cde9d16fb..e4a6b669a0b8 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -583,7 +583,7 @@ native_smp_call_function_mask(cpumask_t mask, atomic_set(&data.finished, 0); call_data = &data; - mb(); + wmb(); /* Send a message to other CPUs */ if (cpus_equal(mask, allbutself)) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index aa2edb7f3a51..e4494e829dfa 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -354,26 +354,30 @@ static void __smp_call_function(void (*func) (void *info), void *info, } -/* - * this function sends a 'generic call function' IPI to all other CPU - * of the system defined in the mask. - */ -static int __smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +int native_smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) { struct call_data_struct data; cpumask_t allbutself; int cpus; + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + /* Holding any lock stops cpus from going down. */ + spin_lock(&call_lock); + allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); cpus_and(mask, mask, allbutself); cpus = cpus_weight(mask); - if (!cpus) + if (!cpus) { + spin_unlock(&call_lock); return 0; + } data.func = func; data.info = info; @@ -395,43 +399,14 @@ static int __smp_call_function_mask(cpumask_t mask, while (atomic_read(&data.started) != cpus) cpu_relax(); - if (!wait) - return 0; + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); - while (atomic_read(&data.finished) != cpus) - cpu_relax(); + spin_unlock(&call_lock); return 0; } -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) -{ - int ret; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - spin_lock(&call_lock); - ret = __smp_call_function_mask(mask, func, info, wait); - spin_unlock(&call_lock); - return ret; -} static void stop_this_cpu(void *dummy) { -- cgit v1.2.1 From 3d3f487c58ef1ece714af280b29411960908149c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:48 -0300 Subject: x86: provide hlt_works function. In x86_64, hlt always work. in i386, we'll query the cpuinfo associated with this cpu Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 9054734589fe..8bec23c15527 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -144,6 +144,15 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); #define current_cpu_data boot_cpu_data #endif +static inline int hlt_works(int cpu) +{ +#ifdef CONFIG_X86_32 + return cpu_data(cpu).hlt_works_ok; +#else + return 1; +#endif +} + #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern void cpu_detect(struct cpuinfo_x86 *c); -- cgit v1.2.1 From 3be5b49e8f1002bc562a2b4670093e4ebf27b4e9 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:49 -0300 Subject: x86: make stop_this_cpu looks exactly equal in both arches with the hlt_works change, it is possible to have i386 and x86_64 stop_this_cpu() looking exactly the same. They can, after that, be merged. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_32.c | 2 +- arch/x86/kernel/smp_64.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index e4a6b669a0b8..cde3a0ecd716 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -611,7 +611,7 @@ static void stop_this_cpu (void * dummy) */ cpu_clear(smp_processor_id(), cpu_online_map); disable_local_APIC(); - if (cpu_data(smp_processor_id()).hlt_works_ok) + if (hlt_works(smp_processor_id())) for(;;) halt(); for (;;); } diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index e4494e829dfa..4e1e2bce969c 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -416,8 +416,9 @@ static void stop_this_cpu(void *dummy) */ cpu_clear(smp_processor_id(), cpu_online_map); disable_local_APIC(); - for (;;) - halt(); + if (hlt_works(smp_processor_id())) + for (;;) halt(); + for (;;); } void smp_send_stop(void) -- cgit v1.2.1 From 321183c145a37e6d31cc55e0f69a226f9006e621 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:50 -0300 Subject: x86: add reboot_force test to native_smp_send_stop This can be safely added to i386. After that, functions look exactly the same for both arches Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_32.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index cde3a0ecd716..8be3e091dcd0 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * Some notes on x86 processor bugs affecting SMP operation: @@ -622,10 +623,14 @@ static void stop_this_cpu (void * dummy) static void native_smp_send_stop(void) { - /* Don't deadlock on the call lock in panic */ - int nolock = !spin_trylock(&call_lock); + int nolock; unsigned long flags; + if (reboot_force) + return; + + /* Don't deadlock on the call lock in panic */ + nolock = !spin_trylock(&call_lock); local_irq_save(flags); __smp_call_function(stop_this_cpu, NULL, 0, 0); if (!nolock) -- cgit v1.2.1 From 377d698426b8c685fb6d48fe89694fe4ce3aa1f8 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:51 -0300 Subject: x86: unify smp_send_stop function definition is moved to common header. x86_64 version is now called native_smp_send_stop Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp_64.c | 3 ++- include/asm-x86/smp.h | 5 +++++ include/asm-x86/smp_32.h | 5 ----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 4e1e2bce969c..ad11ef0c3fae 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -421,7 +421,7 @@ static void stop_this_cpu(void *dummy) for (;;); } -void smp_send_stop(void) +void native_smp_send_stop(void) { int nolock; unsigned long flags; @@ -482,6 +482,7 @@ struct smp_ops smp_ops = { .smp_prepare_cpus = native_smp_prepare_cpus, .smp_cpus_done = native_smp_cpus_done, + .smp_send_stop = native_smp_send_stop, .smp_send_reschedule = native_smp_send_reschedule, .smp_call_function_mask = native_smp_call_function_mask, .cpu_up = native_cpu_up, diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 28cb1f8bb47e..2ab8ed4e99e0 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -25,6 +25,11 @@ struct smp_ops { #ifdef CONFIG_SMP extern struct smp_ops smp_ops; +static inline void smp_send_stop(void) +{ + smp_ops.smp_send_stop(); +} + static inline void smp_prepare_boot_cpu(void) { smp_ops.smp_prepare_boot_cpu(); diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index bc90a4ed3235..41b58e0bc75b 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -39,11 +39,6 @@ extern void remove_siblinginfo(int cpu); extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP -static inline void smp_send_stop(void) -{ - smp_ops.smp_send_stop(); -} - #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif -- cgit v1.2.1 From f9e47a126be2eaabf04a1a5c71ca7b23a473d0d8 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:52 -0300 Subject: x86: create smp.c this patch moves all the functions and data structures that look like exactly the same from smp_{32,64}.c to smp.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 3 +- arch/x86/kernel/smp.c | 253 +++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smp_32.c | 223 ----------------------------------------- arch/x86/kernel/smp_64.c | 205 -------------------------------------- 4 files changed, 255 insertions(+), 429 deletions(-) create mode 100644 arch/x86/kernel/smp.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 018d04d880db..0a4b088bab5d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,7 +46,8 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o -obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smpboot.o tsc_sync.o +obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smp.o +obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c new file mode 100644 index 000000000000..b662300a88f3 --- /dev/null +++ b/arch/x86/kernel/smp.c @@ -0,0 +1,253 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef CONFIG_X86_32 +#include +#include +#else +#include +#endif + +/* + * this function sends a 'reschedule' IPI to another CPU. + * it goes straight through and wastes no time serializing + * anything. Worst case is that we lose a reschedule ... + */ +static void native_smp_send_reschedule(int cpu) +{ + WARN_ON(cpu_is_offline(cpu)); + send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); +} + +/* + * Structure and data for smp_call_function(). This is designed to minimise + * static memory requirements. It also looks cleaner. + */ +static DEFINE_SPINLOCK(call_lock); + +struct call_data_struct { + void (*func) (void *info); + void *info; + atomic_t started; + atomic_t finished; + int wait; +}; + +void lock_ipi_call_lock(void) +{ + spin_lock_irq(&call_lock); +} + +void unlock_ipi_call_lock(void) +{ + spin_unlock_irq(&call_lock); +} + +static struct call_data_struct *call_data; + +static void __smp_call_function(void (*func) (void *info), void *info, + int nonatomic, int wait) +{ + struct call_data_struct data; + int cpus = num_online_cpus() - 1; + + if (!cpus) + return; + + data.func = func; + data.info = info; + atomic_set(&data.started, 0); + data.wait = wait; + if (wait) + atomic_set(&data.finished, 0); + + call_data = &data; + mb(); + + /* Send a message to all other CPUs and wait for them to respond */ + send_IPI_allbutself(CALL_FUNCTION_VECTOR); + + /* Wait for response */ + while (atomic_read(&data.started) != cpus) + cpu_relax(); + + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); +} + + +/** + * smp_call_function_mask(): Run a function on a set of other CPUs. + * @mask: The set of cpus to run on. Must not include the current cpu. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +static int +native_smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) +{ + struct call_data_struct data; + cpumask_t allbutself; + int cpus; + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + /* Holding any lock stops cpus from going down. */ + spin_lock(&call_lock); + + allbutself = cpu_online_map; + cpu_clear(smp_processor_id(), allbutself); + + cpus_and(mask, mask, allbutself); + cpus = cpus_weight(mask); + + if (!cpus) { + spin_unlock(&call_lock); + return 0; + } + + data.func = func; + data.info = info; + atomic_set(&data.started, 0); + data.wait = wait; + if (wait) + atomic_set(&data.finished, 0); + + call_data = &data; + wmb(); + + /* Send a message to other CPUs */ + if (cpus_equal(mask, allbutself)) + send_IPI_allbutself(CALL_FUNCTION_VECTOR); + else + send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + + /* Wait for response */ + while (atomic_read(&data.started) != cpus) + cpu_relax(); + + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); + spin_unlock(&call_lock); + + return 0; +} + +static void stop_this_cpu(void *dummy) +{ + local_irq_disable(); + /* + * Remove this CPU: + */ + cpu_clear(smp_processor_id(), cpu_online_map); + disable_local_APIC(); + if (hlt_works(smp_processor_id())) + for (;;) halt(); + for (;;); +} + +/* + * this function calls the 'stop' function on all other CPUs in the system. + */ + +static void native_smp_send_stop(void) +{ + int nolock; + unsigned long flags; + + if (reboot_force) + return; + + /* Don't deadlock on the call lock in panic */ + nolock = !spin_trylock(&call_lock); + local_irq_save(flags); + __smp_call_function(stop_this_cpu, NULL, 0, 0); + if (!nolock) + spin_unlock(&call_lock); + disable_local_APIC(); + local_irq_restore(flags); +} + +/* + * Reschedule call back. Nothing to do, + * all the work is done automatically when + * we return from the interrupt. + */ +void smp_reschedule_interrupt(struct pt_regs *regs) +{ + ack_APIC_irq(); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_resched_count++; +#else + add_pda(irq_resched_count, 1); +#endif +} + +void smp_call_function_interrupt(struct pt_regs *regs) +{ + void (*func) (void *info) = call_data->func; + void *info = call_data->info; + int wait = call_data->wait; + + ack_APIC_irq(); + /* + * Notify initiating CPU that I've grabbed the data and am + * about to execute the function + */ + mb(); + atomic_inc(&call_data->started); + /* + * At this point the info structure may be out of scope unless wait==1 + */ + irq_enter(); + (*func)(info); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_call_count++; +#else + add_pda(irq_call_count, 1); +#endif + irq_exit(); + + if (wait) { + mb(); + atomic_inc(&call_data->finished); + } +} + +struct smp_ops smp_ops = { + .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, + .smp_prepare_cpus = native_smp_prepare_cpus, + .cpu_up = native_cpu_up, + .smp_cpus_done = native_smp_cpus_done, + + .smp_send_stop = native_smp_send_stop, + .smp_send_reschedule = native_smp_send_reschedule, + .smp_call_function_mask = native_smp_call_function_mask, +}; +EXPORT_SYMBOL_GPL(smp_ops); + diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 8be3e091dcd0..61e546e85733 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -466,217 +466,6 @@ void flush_tlb_all(void) on_each_cpu(do_flush_tlb_all, NULL, 1, 1); } -/* - * this function sends a 'reschedule' IPI to another CPU. - * it goes straight through and wastes no time serializing - * anything. Worst case is that we lose a reschedule ... - */ -static void native_smp_send_reschedule(int cpu) -{ - WARN_ON(cpu_is_offline(cpu)); - send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); -} - -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; - -void lock_ipi_call_lock(void) -{ - spin_lock_irq(&call_lock); -} - -void unlock_ipi_call_lock(void) -{ - spin_unlock_irq(&call_lock); -} - -static struct call_data_struct *call_data; - -static void __smp_call_function(void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - struct call_data_struct data; - int cpus = num_online_cpus() - 1; - - if (!cpus) - return; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); -} - - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int -native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) -{ - struct call_data_struct data; - cpumask_t allbutself; - int cpus; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); - - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - wmb(); - - /* Send a message to other CPUs */ - if (cpus_equal(mask, allbutself)) - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - else - send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - spin_unlock(&call_lock); - - return 0; -} - -static void stop_this_cpu (void * dummy) -{ - local_irq_disable(); - /* - * Remove this CPU: - */ - cpu_clear(smp_processor_id(), cpu_online_map); - disable_local_APIC(); - if (hlt_works(smp_processor_id())) - for(;;) halt(); - for (;;); -} - -/* - * this function calls the 'stop' function on all other CPUs in the system. - */ - -static void native_smp_send_stop(void) -{ - int nolock; - unsigned long flags; - - if (reboot_force) - return; - - /* Don't deadlock on the call lock in panic */ - nolock = !spin_trylock(&call_lock); - local_irq_save(flags); - __smp_call_function(stop_this_cpu, NULL, 0, 0); - if (!nolock) - spin_unlock(&call_lock); - disable_local_APIC(); - local_irq_restore(flags); -} - -/* - * Reschedule call back. Nothing to do, - * all the work is done automatically when - * we return from the interrupt. - */ -void smp_reschedule_interrupt(struct pt_regs *regs) -{ - ack_APIC_irq(); - __get_cpu_var(irq_stat).irq_resched_count++; -} - -void smp_call_function_interrupt(struct pt_regs *regs) -{ - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - ack_APIC_irq(); - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ - irq_enter(); - (*func)(info); - __get_cpu_var(irq_stat).irq_call_count++; - irq_exit(); - - if (wait) { - mb(); - atomic_inc(&call_data->finished); - } -} - static int convert_apicid_to_cpu(int apic_id) { int i; @@ -703,15 +492,3 @@ int safe_smp_processor_id(void) return cpuid >= 0 ? cpuid : 0; } - -struct smp_ops smp_ops = { - .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, - .smp_prepare_cpus = native_smp_prepare_cpus, - .cpu_up = native_cpu_up, - .smp_cpus_done = native_smp_cpus_done, - - .smp_send_stop = native_smp_send_stop, - .smp_send_reschedule = native_smp_send_reschedule, - .smp_call_function_mask = native_smp_call_function_mask, -}; -EXPORT_SYMBOL_GPL(smp_ops); diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index ad11ef0c3fae..d28e8685709d 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -283,208 +283,3 @@ void flush_tlb_all(void) { on_each_cpu(do_flush_tlb_all, NULL, 1, 1); } - -/* - * this function sends a 'reschedule' IPI to another CPU. - * it goes straight through and wastes no time serializing - * anything. Worst case is that we lose a reschedule ... - */ - -static void native_smp_send_reschedule(int cpu) -{ - WARN_ON(cpu_is_offline(cpu)); - send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); -} - -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; - -static struct call_data_struct * call_data; - -void lock_ipi_call_lock(void) -{ - spin_lock_irq(&call_lock); -} - -void unlock_ipi_call_lock(void) -{ - spin_unlock_irq(&call_lock); -} - -static void __smp_call_function(void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - struct call_data_struct data; - int cpus = num_online_cpus() - 1; - - if (!cpus) - return; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); -} - - -int native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) -{ - struct call_data_struct data; - cpumask_t allbutself; - int cpus; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); - - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); - - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - wmb(); - - /* Send a message to other CPUs */ - if (cpus_equal(mask, allbutself)) - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - else - send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - - spin_unlock(&call_lock); - - return 0; -} - -static void stop_this_cpu(void *dummy) -{ - local_irq_disable(); - /* - * Remove this CPU: - */ - cpu_clear(smp_processor_id(), cpu_online_map); - disable_local_APIC(); - if (hlt_works(smp_processor_id())) - for (;;) halt(); - for (;;); -} - -void native_smp_send_stop(void) -{ - int nolock; - unsigned long flags; - - if (reboot_force) - return; - - /* Don't deadlock on the call lock in panic */ - nolock = !spin_trylock(&call_lock); - local_irq_save(flags); - __smp_call_function(stop_this_cpu, NULL, 0, 0); - if (!nolock) - spin_unlock(&call_lock); - disable_local_APIC(); - local_irq_restore(flags); -} - -/* - * Reschedule call back. Nothing to do, - * all the work is done automatically when - * we return from the interrupt. - */ -asmlinkage void smp_reschedule_interrupt(void) -{ - ack_APIC_irq(); - add_pda(irq_resched_count, 1); -} - -asmlinkage void smp_call_function_interrupt(void) -{ - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - ack_APIC_irq(); - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ - exit_idle(); - irq_enter(); - (*func)(info); - add_pda(irq_call_count, 1); - irq_exit(); - if (wait) { - mb(); - atomic_inc(&call_data->finished); - } -} - -struct smp_ops smp_ops = { - .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, - .smp_prepare_cpus = native_smp_prepare_cpus, - .smp_cpus_done = native_smp_cpus_done, - - .smp_send_stop = native_smp_send_stop, - .smp_send_reschedule = native_smp_send_reschedule, - .smp_call_function_mask = native_smp_call_function_mask, - .cpu_up = native_cpu_up, -}; -EXPORT_SYMBOL_GPL(smp_ops); -- cgit v1.2.1 From 8202350367ac11d571f6dd4c21c2027a4d235276 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:53 -0300 Subject: x86: create ipi.c This patch moves all ipi and apic related functions from smp_32.c to ipi.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/ipi.c | 178 +++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smp_32.c | 153 ---------------------------------------- 3 files changed, 179 insertions(+), 154 deletions(-) create mode 100644 arch/x86/kernel/ipi.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0a4b088bab5d..e3b01f96c565 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -47,7 +47,7 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smp.o -obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o +obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c new file mode 100644 index 000000000000..c0df7b89ca23 --- /dev/null +++ b/arch/x86/kernel/ipi.c @@ -0,0 +1,178 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86_32 +#include +/* + * the following functions deal with sending IPIs between CPUs. + * + * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. + */ + +static inline int __prepare_ICR(unsigned int shortcut, int vector) +{ + unsigned int icr = shortcut | APIC_DEST_LOGICAL; + + switch (vector) { + default: + icr |= APIC_DM_FIXED | vector; + break; + case NMI_VECTOR: + icr |= APIC_DM_NMI; + break; + } + return icr; +} + +static inline int __prepare_ICR2(unsigned int mask) +{ + return SET_APIC_DEST_FIELD(mask); +} + +void __send_IPI_shortcut(unsigned int shortcut, int vector) +{ + /* + * Subtle. In the case of the 'never do double writes' workaround + * we have to lock out interrupts to be safe. As we don't care + * of the value read we use an atomic rmw access to avoid costly + * cli/sti. Otherwise we use an even cheaper single atomic write + * to the APIC. + */ + unsigned int cfg; + + /* + * Wait for idle. + */ + apic_wait_icr_idle(); + + /* + * No need to touch the target chip field + */ + cfg = __prepare_ICR(shortcut, vector); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + apic_write_around(APIC_ICR, cfg); +} + +void send_IPI_self(int vector) +{ + __send_IPI_shortcut(APIC_DEST_SELF, vector); +} + +/* + * This is used to send an IPI with no shorthand notation (the destination is + * specified in bits 56 to 63 of the ICR). + */ +static inline void __send_IPI_dest_field(unsigned long mask, int vector) +{ + unsigned long cfg; + + /* + * Wait for idle. + */ + if (unlikely(vector == NMI_VECTOR)) + safe_apic_wait_icr_idle(); + else + apic_wait_icr_idle(); + + /* + * prepare target chip field + */ + cfg = __prepare_ICR2(mask); + apic_write_around(APIC_ICR2, cfg); + + /* + * program the ICR + */ + cfg = __prepare_ICR(0, vector); + + /* + * Send the IPI. The write to APIC_ICR fires this off. + */ + apic_write_around(APIC_ICR, cfg); +} + +/* + * This is only used on smaller machines. + */ +void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) +{ + unsigned long mask = cpus_addr(cpumask)[0]; + unsigned long flags; + + local_irq_save(flags); + WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); + __send_IPI_dest_field(mask, vector); + local_irq_restore(flags); +} + +void send_IPI_mask_sequence(cpumask_t mask, int vector) +{ + unsigned long flags; + unsigned int query_cpu; + + /* + * Hack. The clustered APIC addressing mode doesn't allow us to send + * to an arbitrary mask, so I do a unicasts to each CPU instead. This + * should be modified to do 1 message per cluster ID - mbligh + */ + + local_irq_save(flags); + for_each_possible_cpu(query_cpu) { + if (cpu_isset(query_cpu, mask)) { + __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), + vector); + } + } + local_irq_restore(flags); +} + +/* must come after the send_IPI functions above for inlining */ +#include +static int convert_apicid_to_cpu(int apic_id) +{ + int i; + + for_each_possible_cpu(i) { + if (per_cpu(x86_cpu_to_apicid, i) == apic_id) + return i; + } + return -1; +} + +int safe_smp_processor_id(void) +{ + int apicid, cpuid; + + if (!boot_cpu_has(X86_FEATURE_APIC)) + return 0; + + apicid = hard_smp_processor_id(); + if (apicid == BAD_APICID) + return 0; + + cpuid = convert_apicid_to_cpu(apicid); + + return cpuid >= 0 ? cpuid : 0; +} +#endif diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 61e546e85733..d80623aba9c5 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -107,132 +107,6 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; -/* - * the following functions deal with sending IPIs between CPUs. - * - * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. - */ - -static inline int __prepare_ICR (unsigned int shortcut, int vector) -{ - unsigned int icr = shortcut | APIC_DEST_LOGICAL; - - switch (vector) { - default: - icr |= APIC_DM_FIXED | vector; - break; - case NMI_VECTOR: - icr |= APIC_DM_NMI; - break; - } - return icr; -} - -static inline int __prepare_ICR2 (unsigned int mask) -{ - return SET_APIC_DEST_FIELD(mask); -} - -void __send_IPI_shortcut(unsigned int shortcut, int vector) -{ - /* - * Subtle. In the case of the 'never do double writes' workaround - * we have to lock out interrupts to be safe. As we don't care - * of the value read we use an atomic rmw access to avoid costly - * cli/sti. Otherwise we use an even cheaper single atomic write - * to the APIC. - */ - unsigned int cfg; - - /* - * Wait for idle. - */ - apic_wait_icr_idle(); - - /* - * No need to touch the target chip field - */ - cfg = __prepare_ICR(shortcut, vector); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write_around(APIC_ICR, cfg); -} - -void send_IPI_self(int vector) -{ - __send_IPI_shortcut(APIC_DEST_SELF, vector); -} - -/* - * This is used to send an IPI with no shorthand notation (the destination is - * specified in bits 56 to 63 of the ICR). - */ -static inline void __send_IPI_dest_field(unsigned long mask, int vector) -{ - unsigned long cfg; - - /* - * Wait for idle. - */ - if (unlikely(vector == NMI_VECTOR)) - safe_apic_wait_icr_idle(); - else - apic_wait_icr_idle(); - - /* - * prepare target chip field - */ - cfg = __prepare_ICR2(mask); - apic_write_around(APIC_ICR2, cfg); - - /* - * program the ICR - */ - cfg = __prepare_ICR(0, vector); - - /* - * Send the IPI. The write to APIC_ICR fires this off. - */ - apic_write_around(APIC_ICR, cfg); -} - -/* - * This is only used on smaller machines. - */ -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) -{ - unsigned long mask = cpus_addr(cpumask)[0]; - unsigned long flags; - - local_irq_save(flags); - WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); - __send_IPI_dest_field(mask, vector); - local_irq_restore(flags); -} - -void send_IPI_mask_sequence(cpumask_t mask, int vector) -{ - unsigned long flags; - unsigned int query_cpu; - - /* - * Hack. The clustered APIC addressing mode doesn't allow us to send - * to an arbitrary mask, so I do a unicasts to each CPU instead. This - * should be modified to do 1 message per cluster ID - mbligh - */ - - local_irq_save(flags); - for_each_possible_cpu(query_cpu) { - if (cpu_isset(query_cpu, mask)) { - __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), - vector); - } - } - local_irq_restore(flags); -} - #include /* must come after the send_IPI functions above for inlining */ /* @@ -465,30 +339,3 @@ void flush_tlb_all(void) { on_each_cpu(do_flush_tlb_all, NULL, 1, 1); } - -static int convert_apicid_to_cpu(int apic_id) -{ - int i; - - for_each_possible_cpu(i) { - if (per_cpu(x86_cpu_to_apicid, i) == apic_id) - return i; - } - return -1; -} - -int safe_smp_processor_id(void) -{ - int apicid, cpuid; - - if (!boot_cpu_has(X86_FEATURE_APIC)) - return 0; - - apicid = hard_smp_processor_id(); - if (apicid == BAD_APICID) - return 0; - - cpuid = convert_apicid_to_cpu(apicid); - - return cpuid >= 0 ? cpuid : 0; -} -- cgit v1.2.1 From c048fdfe6178e082be918d4062c86d9764979112 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:54 -0300 Subject: x86: create tlb files this patch creates tlb_32.c and tlb_64.c, with tlb-related functions that used to live in smp*.c files. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/smp_32.c | 235 ---------------------------------------- arch/x86/kernel/smp_64.c | 275 ----------------------------------------------- arch/x86/kernel/tlb_32.c | 243 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/tlb_64.c | 273 ++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 517 insertions(+), 511 deletions(-) create mode 100644 arch/x86/kernel/tlb_32.c create mode 100644 arch/x86/kernel/tlb_64.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index e3b01f96c565..362ab6a9d5b2 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -47,7 +47,7 @@ obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smp.o -obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o +obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index d80623aba9c5..d8fdec5f19bc 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -104,238 +104,3 @@ * or are signal timing bugs worked around in hardware and there's * about nothing of note with C stepping upwards. */ - -DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; - -#include /* must come after the send_IPI functions above for inlining */ - -/* - * Smarter SMP flushing macros. - * c/o Linus Torvalds. - * - * These mean you can really definitely utterly forget about - * writing to user space from interrupts. (Its not allowed anyway). - * - * Optimizations Manfred Spraul - */ - -static cpumask_t flush_cpumask; -static struct mm_struct * flush_mm; -static unsigned long flush_va; -static DEFINE_SPINLOCK(tlbstate_lock); - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm->cpu_vm_mask. - * - * We need to reload %cr3 since the page tables may be going - * away from under us.. - */ -void leave_mm(int cpu) -{ - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) - BUG(); - cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); - load_cr3(swapper_pg_dir); -} -EXPORT_SYMBOL_GPL(leave_mm); - -/* - * - * The flush IPI assumes that a thread switch happens in this order: - * [cpu0: the cpu that switches] - * 1) switch_mm() either 1a) or 1b) - * 1a) thread switch to a different mm - * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); - * Stop ipi delivery for the old mm. This is not synchronized with - * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superfluous - * tlb flush. - * 1a2) set cpu_tlbstate to TLBSTATE_OK - * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 - * was in lazy tlb mode. - * 1a3) update cpu_tlbstate[].active_mm - * Now cpu0 accepts tlb flushes for the new mm. - * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); - * Now the other cpus will send tlb flush ipis. - * 1a4) change cr3. - * 1b) thread switch without mm change - * cpu_tlbstate[].active_mm is correct, cpu0 already handles - * flush ipis. - * 1b1) set cpu_tlbstate to TLBSTATE_OK - * 1b2) test_and_set the cpu bit in cpu_vm_mask. - * Atomically set the bit [other cpus will start sending flush ipis], - * and test the bit. - * 1b3) if the bit was 0: leave_mm was called, flush the tlb. - * 2) switch %%esp, ie current - * - * The interrupt must handle 2 special cases: - * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. - * - the cpu performs speculative tlb reads, i.e. even if the cpu only - * runs in kernel space, the cpu could load tlb entries for user space - * pages. - * - * The good news is that cpu_tlbstate is local to each cpu, no - * write/read ordering problems. - */ - -/* - * TLB flush IPI: - * - * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. - * 2) Leave the mm if we are in the lazy tlb mode. - */ - -void smp_invalidate_interrupt(struct pt_regs *regs) -{ - unsigned long cpu; - - cpu = get_cpu(); - - if (!cpu_isset(cpu, flush_cpumask)) - goto out; - /* - * This was a BUG() but until someone can quote me the - * line from the intel manual that guarantees an IPI to - * multiple CPUs is retried _only_ on the erroring CPUs - * its staying as a return - * - * BUG(); - */ - - if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { - if (flush_va == TLB_FLUSH_ALL) - local_flush_tlb(); - else - __flush_tlb_one(flush_va); - } else - leave_mm(cpu); - } - ack_APIC_irq(); - smp_mb__before_clear_bit(); - cpu_clear(cpu, flush_cpumask); - smp_mb__after_clear_bit(); -out: - put_cpu_no_resched(); - __get_cpu_var(irq_stat).irq_tlb_count++; -} - -void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, - unsigned long va) -{ - cpumask_t cpumask = *cpumaskp; - - /* - * A couple of (to be removed) sanity checks: - * - * - current CPU must not be in mask - * - mask must exist :) - */ - BUG_ON(cpus_empty(cpumask)); - BUG_ON(cpu_isset(smp_processor_id(), cpumask)); - BUG_ON(!mm); - -#ifdef CONFIG_HOTPLUG_CPU - /* If a CPU which we ran on has gone down, OK. */ - cpus_and(cpumask, cpumask, cpu_online_map); - if (unlikely(cpus_empty(cpumask))) - return; -#endif - - /* - * i'm not happy about this global shared spinlock in the - * MM hot path, but we'll see how contended it is. - * AK: x86-64 has a faster method that could be ported. - */ - spin_lock(&tlbstate_lock); - - flush_mm = mm; - flush_va = va; - cpus_or(flush_cpumask, cpumask, flush_cpumask); - /* - * We have to send the IPI only to - * CPUs affected. - */ - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); - - while (!cpus_empty(flush_cpumask)) - /* nothing. lockup detection does not belong here */ - cpu_relax(); - - flush_mm = NULL; - flush_va = 0; - spin_unlock(&tlbstate_lock); -} - -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - local_flush_tlb(); - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); - preempt_enable(); -} - -void flush_tlb_mm (struct mm_struct * mm) -{ - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - leave_mm(smp_processor_id()); - } - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) -{ - struct mm_struct *mm = vma->vm_mm; - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - if (current->active_mm == mm) { - if(current->mm) - __flush_tlb_one(va); - else - leave_mm(smp_processor_id()); - } - - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, va); - - preempt_enable(); -} -EXPORT_SYMBOL(flush_tlb_page); - -static void do_flush_tlb_all(void* info) -{ - unsigned long cpu = smp_processor_id(); - - __flush_tlb_all(); - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) - leave_mm(cpu); -} - -void flush_tlb_all(void) -{ - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); -} diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index d28e8685709d..26448fff0abd 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -8,278 +8,3 @@ * This code is released under the GNU General Public License version 2 or * later. */ - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Smarter SMP flushing macros. - * c/o Linus Torvalds. - * - * These mean you can really definitely utterly forget about - * writing to user space from interrupts. (Its not allowed anyway). - * - * Optimizations Manfred Spraul - * - * More scalable flush, from Andi Kleen - * - * To avoid global state use 8 different call vectors. - * Each CPU uses a specific vector to trigger flushes on other - * CPUs. Depending on the received vector the target CPUs look into - * the right per cpu variable for the flush data. - * - * With more than 8 CPUs they are hashed to the 8 available - * vectors. The limited global vector space forces us to this right now. - * In future when interrupts are split into per CPU domains this could be - * fixed, at the cost of triggering multiple IPIs in some cases. - */ - -union smp_flush_state { - struct { - cpumask_t flush_cpumask; - struct mm_struct *flush_mm; - unsigned long flush_va; - spinlock_t tlbstate_lock; - }; - char pad[SMP_CACHE_BYTES]; -} ____cacheline_aligned; - -/* State is put into the per CPU data section, but padded - to a full cache line because other CPUs can access it and we don't - want false sharing in the per cpu data segment. */ -static DEFINE_PER_CPU(union smp_flush_state, flush_state); - -/* - * We cannot call mmdrop() because we are in interrupt context, - * instead update mm->cpu_vm_mask. - */ -void leave_mm(int cpu) -{ - if (read_pda(mmu_state) == TLBSTATE_OK) - BUG(); - cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); - load_cr3(swapper_pg_dir); -} -EXPORT_SYMBOL_GPL(leave_mm); - -/* - * - * The flush IPI assumes that a thread switch happens in this order: - * [cpu0: the cpu that switches] - * 1) switch_mm() either 1a) or 1b) - * 1a) thread switch to a different mm - * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); - * Stop ipi delivery for the old mm. This is not synchronized with - * the other cpus, but smp_invalidate_interrupt ignore flush ipis - * for the wrong mm, and in the worst case we perform a superfluous - * tlb flush. - * 1a2) set cpu mmu_state to TLBSTATE_OK - * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 - * was in lazy tlb mode. - * 1a3) update cpu active_mm - * Now cpu0 accepts tlb flushes for the new mm. - * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); - * Now the other cpus will send tlb flush ipis. - * 1a4) change cr3. - * 1b) thread switch without mm change - * cpu active_mm is correct, cpu0 already handles - * flush ipis. - * 1b1) set cpu mmu_state to TLBSTATE_OK - * 1b2) test_and_set the cpu bit in cpu_vm_mask. - * Atomically set the bit [other cpus will start sending flush ipis], - * and test the bit. - * 1b3) if the bit was 0: leave_mm was called, flush the tlb. - * 2) switch %%esp, ie current - * - * The interrupt must handle 2 special cases: - * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. - * - the cpu performs speculative tlb reads, i.e. even if the cpu only - * runs in kernel space, the cpu could load tlb entries for user space - * pages. - * - * The good news is that cpu mmu_state is local to each cpu, no - * write/read ordering problems. - */ - -/* - * TLB flush IPI: - * - * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. - * 2) Leave the mm if we are in the lazy tlb mode. - * - * Interrupts are disabled. - */ - -asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) -{ - int cpu; - int sender; - union smp_flush_state *f; - - cpu = smp_processor_id(); - /* - * orig_rax contains the negated interrupt vector. - * Use that to determine where the sender put the data. - */ - sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; - f = &per_cpu(flush_state, sender); - - if (!cpu_isset(cpu, f->flush_cpumask)) - goto out; - /* - * This was a BUG() but until someone can quote me the - * line from the intel manual that guarantees an IPI to - * multiple CPUs is retried _only_ on the erroring CPUs - * its staying as a return - * - * BUG(); - */ - - if (f->flush_mm == read_pda(active_mm)) { - if (read_pda(mmu_state) == TLBSTATE_OK) { - if (f->flush_va == TLB_FLUSH_ALL) - local_flush_tlb(); - else - __flush_tlb_one(f->flush_va); - } else - leave_mm(cpu); - } -out: - ack_APIC_irq(); - cpu_clear(cpu, f->flush_cpumask); - add_pda(irq_tlb_count, 1); -} - -void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, - unsigned long va) -{ - int sender; - union smp_flush_state *f; - cpumask_t cpumask = *cpumaskp; - - /* Caller has disabled preemption */ - sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; - f = &per_cpu(flush_state, sender); - - /* - * Could avoid this lock when - * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is - * probably not worth checking this for a cache-hot lock. - */ - spin_lock(&f->tlbstate_lock); - - f->flush_mm = mm; - f->flush_va = va; - cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); - - /* - * We have to send the IPI only to - * CPUs affected. - */ - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); - - while (!cpus_empty(f->flush_cpumask)) - cpu_relax(); - - f->flush_mm = NULL; - f->flush_va = 0; - spin_unlock(&f->tlbstate_lock); -} - -int __cpuinit init_smp_flush(void) -{ - int i; - - for_each_cpu_mask(i, cpu_possible_map) { - spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); - } - return 0; -} -core_initcall(init_smp_flush); - -void flush_tlb_current_task(void) -{ - struct mm_struct *mm = current->mm; - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - local_flush_tlb(); - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); - preempt_enable(); -} - -void flush_tlb_mm (struct mm_struct * mm) -{ - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - leave_mm(smp_processor_id()); - } - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); - - preempt_enable(); -} - -void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) -{ - struct mm_struct *mm = vma->vm_mm; - cpumask_t cpu_mask; - - preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); - - if (current->active_mm == mm) { - if(current->mm) - __flush_tlb_one(va); - else - leave_mm(smp_processor_id()); - } - - if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, va); - - preempt_enable(); -} - -static void do_flush_tlb_all(void* info) -{ - unsigned long cpu = smp_processor_id(); - - __flush_tlb_all(); - if (read_pda(mmu_state) == TLBSTATE_LAZY) - leave_mm(cpu); -} - -void flush_tlb_all(void) -{ - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); -} diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c new file mode 100644 index 000000000000..9bb2363851af --- /dev/null +++ b/arch/x86/kernel/tlb_32.c @@ -0,0 +1,243 @@ +#include +#include +#include + +#include + +DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) + ____cacheline_aligned = { &init_mm, 0, }; + +/* must come after the send_IPI functions above for inlining */ +#include + +/* + * Smarter SMP flushing macros. + * c/o Linus Torvalds. + * + * These mean you can really definitely utterly forget about + * writing to user space from interrupts. (Its not allowed anyway). + * + * Optimizations Manfred Spraul + */ + +static cpumask_t flush_cpumask; +static struct mm_struct *flush_mm; +static unsigned long flush_va; +static DEFINE_SPINLOCK(tlbstate_lock); + +/* + * We cannot call mmdrop() because we are in interrupt context, + * instead update mm->cpu_vm_mask. + * + * We need to reload %cr3 since the page tables may be going + * away from under us.. + */ +void leave_mm(int cpu) +{ + if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) + BUG(); + cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); + load_cr3(swapper_pg_dir); +} +EXPORT_SYMBOL_GPL(leave_mm); + +/* + * + * The flush IPI assumes that a thread switch happens in this order: + * [cpu0: the cpu that switches] + * 1) switch_mm() either 1a) or 1b) + * 1a) thread switch to a different mm + * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); + * Stop ipi delivery for the old mm. This is not synchronized with + * the other cpus, but smp_invalidate_interrupt ignore flush ipis + * for the wrong mm, and in the worst case we perform a superfluous + * tlb flush. + * 1a2) set cpu_tlbstate to TLBSTATE_OK + * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 + * was in lazy tlb mode. + * 1a3) update cpu_tlbstate[].active_mm + * Now cpu0 accepts tlb flushes for the new mm. + * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); + * Now the other cpus will send tlb flush ipis. + * 1a4) change cr3. + * 1b) thread switch without mm change + * cpu_tlbstate[].active_mm is correct, cpu0 already handles + * flush ipis. + * 1b1) set cpu_tlbstate to TLBSTATE_OK + * 1b2) test_and_set the cpu bit in cpu_vm_mask. + * Atomically set the bit [other cpus will start sending flush ipis], + * and test the bit. + * 1b3) if the bit was 0: leave_mm was called, flush the tlb. + * 2) switch %%esp, ie current + * + * The interrupt must handle 2 special cases: + * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. + * - the cpu performs speculative tlb reads, i.e. even if the cpu only + * runs in kernel space, the cpu could load tlb entries for user space + * pages. + * + * The good news is that cpu_tlbstate is local to each cpu, no + * write/read ordering problems. + */ + +/* + * TLB flush IPI: + * + * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. + * 2) Leave the mm if we are in the lazy tlb mode. + */ + +void smp_invalidate_interrupt(struct pt_regs *regs) +{ + unsigned long cpu; + + cpu = get_cpu(); + + if (!cpu_isset(cpu, flush_cpumask)) + goto out; + /* + * This was a BUG() but until someone can quote me the + * line from the intel manual that guarantees an IPI to + * multiple CPUs is retried _only_ on the erroring CPUs + * its staying as a return + * + * BUG(); + */ + + if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { + if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { + if (flush_va == TLB_FLUSH_ALL) + local_flush_tlb(); + else + __flush_tlb_one(flush_va); + } else + leave_mm(cpu); + } + ack_APIC_irq(); + smp_mb__before_clear_bit(); + cpu_clear(cpu, flush_cpumask); + smp_mb__after_clear_bit(); +out: + put_cpu_no_resched(); + __get_cpu_var(irq_stat).irq_tlb_count++; +} + +void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, + unsigned long va) +{ + cpumask_t cpumask = *cpumaskp; + + /* + * A couple of (to be removed) sanity checks: + * + * - current CPU must not be in mask + * - mask must exist :) + */ + BUG_ON(cpus_empty(cpumask)); + BUG_ON(cpu_isset(smp_processor_id(), cpumask)); + BUG_ON(!mm); + +#ifdef CONFIG_HOTPLUG_CPU + /* If a CPU which we ran on has gone down, OK. */ + cpus_and(cpumask, cpumask, cpu_online_map); + if (unlikely(cpus_empty(cpumask))) + return; +#endif + + /* + * i'm not happy about this global shared spinlock in the + * MM hot path, but we'll see how contended it is. + * AK: x86-64 has a faster method that could be ported. + */ + spin_lock(&tlbstate_lock); + + flush_mm = mm; + flush_va = va; + cpus_or(flush_cpumask, cpumask, flush_cpumask); + /* + * We have to send the IPI only to + * CPUs affected. + */ + send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); + + while (!cpus_empty(flush_cpumask)) + /* nothing. lockup detection does not belong here */ + cpu_relax(); + + flush_mm = NULL; + flush_va = 0; + spin_unlock(&tlbstate_lock); +} + +void flush_tlb_current_task(void) +{ + struct mm_struct *mm = current->mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + local_flush_tlb(); + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + preempt_enable(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + if (current->active_mm == mm) { + if (current->mm) + local_flush_tlb(); + else + leave_mm(smp_processor_id()); + } + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + + preempt_enable(); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) +{ + struct mm_struct *mm = vma->vm_mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + if (current->active_mm == mm) { + if (current->mm) + __flush_tlb_one(va); + else + leave_mm(smp_processor_id()); + } + + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, va); + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +static void do_flush_tlb_all(void *info) +{ + unsigned long cpu = smp_processor_id(); + + __flush_tlb_all(); + if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) + leave_mm(cpu); +} + +void flush_tlb_all(void) +{ + on_each_cpu(do_flush_tlb_all, NULL, 1, 1); +} + diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c new file mode 100644 index 000000000000..615d84817758 --- /dev/null +++ b/arch/x86/kernel/tlb_64.c @@ -0,0 +1,273 @@ +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +/* + * Smarter SMP flushing macros. + * c/o Linus Torvalds. + * + * These mean you can really definitely utterly forget about + * writing to user space from interrupts. (Its not allowed anyway). + * + * Optimizations Manfred Spraul + * + * More scalable flush, from Andi Kleen + * + * To avoid global state use 8 different call vectors. + * Each CPU uses a specific vector to trigger flushes on other + * CPUs. Depending on the received vector the target CPUs look into + * the right per cpu variable for the flush data. + * + * With more than 8 CPUs they are hashed to the 8 available + * vectors. The limited global vector space forces us to this right now. + * In future when interrupts are split into per CPU domains this could be + * fixed, at the cost of triggering multiple IPIs in some cases. + */ + +union smp_flush_state { + struct { + cpumask_t flush_cpumask; + struct mm_struct *flush_mm; + unsigned long flush_va; + spinlock_t tlbstate_lock; + }; + char pad[SMP_CACHE_BYTES]; +} ____cacheline_aligned; + +/* State is put into the per CPU data section, but padded + to a full cache line because other CPUs can access it and we don't + want false sharing in the per cpu data segment. */ +static DEFINE_PER_CPU(union smp_flush_state, flush_state); + +/* + * We cannot call mmdrop() because we are in interrupt context, + * instead update mm->cpu_vm_mask. + */ +void leave_mm(int cpu) +{ + if (read_pda(mmu_state) == TLBSTATE_OK) + BUG(); + cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); + load_cr3(swapper_pg_dir); +} +EXPORT_SYMBOL_GPL(leave_mm); + +/* + * + * The flush IPI assumes that a thread switch happens in this order: + * [cpu0: the cpu that switches] + * 1) switch_mm() either 1a) or 1b) + * 1a) thread switch to a different mm + * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); + * Stop ipi delivery for the old mm. This is not synchronized with + * the other cpus, but smp_invalidate_interrupt ignore flush ipis + * for the wrong mm, and in the worst case we perform a superfluous + * tlb flush. + * 1a2) set cpu mmu_state to TLBSTATE_OK + * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 + * was in lazy tlb mode. + * 1a3) update cpu active_mm + * Now cpu0 accepts tlb flushes for the new mm. + * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); + * Now the other cpus will send tlb flush ipis. + * 1a4) change cr3. + * 1b) thread switch without mm change + * cpu active_mm is correct, cpu0 already handles + * flush ipis. + * 1b1) set cpu mmu_state to TLBSTATE_OK + * 1b2) test_and_set the cpu bit in cpu_vm_mask. + * Atomically set the bit [other cpus will start sending flush ipis], + * and test the bit. + * 1b3) if the bit was 0: leave_mm was called, flush the tlb. + * 2) switch %%esp, ie current + * + * The interrupt must handle 2 special cases: + * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. + * - the cpu performs speculative tlb reads, i.e. even if the cpu only + * runs in kernel space, the cpu could load tlb entries for user space + * pages. + * + * The good news is that cpu mmu_state is local to each cpu, no + * write/read ordering problems. + */ + +/* + * TLB flush IPI: + * + * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. + * 2) Leave the mm if we are in the lazy tlb mode. + * + * Interrupts are disabled. + */ + +asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) +{ + int cpu; + int sender; + union smp_flush_state *f; + + cpu = smp_processor_id(); + /* + * orig_rax contains the negated interrupt vector. + * Use that to determine where the sender put the data. + */ + sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; + f = &per_cpu(flush_state, sender); + + if (!cpu_isset(cpu, f->flush_cpumask)) + goto out; + /* + * This was a BUG() but until someone can quote me the + * line from the intel manual that guarantees an IPI to + * multiple CPUs is retried _only_ on the erroring CPUs + * its staying as a return + * + * BUG(); + */ + + if (f->flush_mm == read_pda(active_mm)) { + if (read_pda(mmu_state) == TLBSTATE_OK) { + if (f->flush_va == TLB_FLUSH_ALL) + local_flush_tlb(); + else + __flush_tlb_one(f->flush_va); + } else + leave_mm(cpu); + } +out: + ack_APIC_irq(); + cpu_clear(cpu, f->flush_cpumask); + add_pda(irq_tlb_count, 1); +} + +void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, + unsigned long va) +{ + int sender; + union smp_flush_state *f; + cpumask_t cpumask = *cpumaskp; + + /* Caller has disabled preemption */ + sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; + f = &per_cpu(flush_state, sender); + + /* + * Could avoid this lock when + * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is + * probably not worth checking this for a cache-hot lock. + */ + spin_lock(&f->tlbstate_lock); + + f->flush_mm = mm; + f->flush_va = va; + cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); + + /* + * We have to send the IPI only to + * CPUs affected. + */ + send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); + + while (!cpus_empty(f->flush_cpumask)) + cpu_relax(); + + f->flush_mm = NULL; + f->flush_va = 0; + spin_unlock(&f->tlbstate_lock); +} + +int __cpuinit init_smp_flush(void) +{ + int i; + + for_each_cpu_mask(i, cpu_possible_map) { + spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); + } + return 0; +} +core_initcall(init_smp_flush); + +void flush_tlb_current_task(void) +{ + struct mm_struct *mm = current->mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + local_flush_tlb(); + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + preempt_enable(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + if (current->active_mm == mm) { + if (current->mm) + local_flush_tlb(); + else + leave_mm(smp_processor_id()); + } + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); + + preempt_enable(); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) +{ + struct mm_struct *mm = vma->vm_mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + if (current->active_mm == mm) { + if (current->mm) + __flush_tlb_one(va); + else + leave_mm(smp_processor_id()); + } + + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, va); + + preempt_enable(); +} + +static void do_flush_tlb_all(void *info) +{ + unsigned long cpu = smp_processor_id(); + + __flush_tlb_all(); + if (read_pda(mmu_state) == TLBSTATE_LAZY) + leave_mm(cpu); +} + +void flush_tlb_all(void) +{ + on_each_cpu(do_flush_tlb_all, NULL, 1, 1); +} -- cgit v1.2.1 From 0941ecb55fbfd2d8bcc62dfd2fcaba1b35f2f196 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:55 -0300 Subject: x86: get rid of smp_32.c and smp_64.c This patch merges the copyright notices, and valuable comments that were left back on smp_{32,64}.c. With that, files are empty, and are deleted Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 4 +- arch/x86/kernel/smp.c | 91 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smp_32.c | 106 ----------------------------------------------- arch/x86/kernel/smp_64.c | 10 ----- 4 files changed, 93 insertions(+), 118 deletions(-) delete mode 100644 arch/x86/kernel/smp_32.c delete mode 100644 arch/x86/kernel/smp_64.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 362ab6a9d5b2..c436e747f502 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,10 +46,10 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o -obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smp.o +obj-$(CONFIG_X86_SMP) += smpboot_$(BITS).o smp.o obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o obj-$(CONFIG_X86_32_SMP) += smpcommon.o -obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o +obj-$(CONFIG_X86_64_SMP) += smpboot_64.o tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index b662300a88f3..88c1e518a203 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -1,3 +1,16 @@ +/* + * Intel SMP support routines. + * + * (c) 1995 Alan Cox, Building #3 + * (c) 1998-99, 2000 Ingo Molnar + * (c) 2002,2003 Andi Kleen, SuSE Labs. + * + * i386 and x86_64 integration by Glauber Costa + * + * This code is released under the GNU General Public License version 2 or + * later. + */ + #include #include @@ -19,6 +32,84 @@ #else #include #endif +/* + * Some notes on x86 processor bugs affecting SMP operation: + * + * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. + * The Linux implications for SMP are handled as follows: + * + * Pentium III / [Xeon] + * None of the E1AP-E3AP errata are visible to the user. + * + * E1AP. see PII A1AP + * E2AP. see PII A2AP + * E3AP. see PII A3AP + * + * Pentium II / [Xeon] + * None of the A1AP-A3AP errata are visible to the user. + * + * A1AP. see PPro 1AP + * A2AP. see PPro 2AP + * A3AP. see PPro 7AP + * + * Pentium Pro + * None of 1AP-9AP errata are visible to the normal user, + * except occasional delivery of 'spurious interrupt' as trap #15. + * This is very rare and a non-problem. + * + * 1AP. Linux maps APIC as non-cacheable + * 2AP. worked around in hardware + * 3AP. fixed in C0 and above steppings microcode update. + * Linux does not use excessive STARTUP_IPIs. + * 4AP. worked around in hardware + * 5AP. symmetric IO mode (normal Linux operation) not affected. + * 'noapic' mode has vector 0xf filled out properly. + * 6AP. 'noapic' mode might be affected - fixed in later steppings + * 7AP. We do not assume writes to the LVT deassering IRQs + * 8AP. We do not enable low power mode (deep sleep) during MP bootup + * 9AP. We do not use mixed mode + * + * Pentium + * There is a marginal case where REP MOVS on 100MHz SMP + * machines with B stepping processors can fail. XXX should provide + * an L1cache=Writethrough or L1cache=off option. + * + * B stepping CPUs may hang. There are hardware work arounds + * for this. We warn about it in case your board doesn't have the work + * arounds. Basically that's so I can tell anyone with a B stepping + * CPU and SMP problems "tough". + * + * Specific items [From Pentium Processor Specification Update] + * + * 1AP. Linux doesn't use remote read + * 2AP. Linux doesn't trust APIC errors + * 3AP. We work around this + * 4AP. Linux never generated 3 interrupts of the same priority + * to cause a lost local interrupt. + * 5AP. Remote read is never used + * 6AP. not affected - worked around in hardware + * 7AP. not affected - worked around in hardware + * 8AP. worked around in hardware - we get explicit CS errors if not + * 9AP. only 'noapic' mode affected. Might generate spurious + * interrupts, we log only the first one and count the + * rest silently. + * 10AP. not affected - worked around in hardware + * 11AP. Linux reads the APIC between writes to avoid this, as per + * the documentation. Make sure you preserve this as it affects + * the C stepping chips too. + * 12AP. not affected - worked around in hardware + * 13AP. not affected - worked around in hardware + * 14AP. we always deassert INIT during bootup + * 15AP. not affected - worked around in hardware + * 16AP. not affected - worked around in hardware + * 17AP. not affected - worked around in hardware + * 18AP. not affected - worked around in hardware + * 19AP. not affected - worked around in BIOS + * + * If this sounds worrying believe me these bugs are either ___RARE___, + * or are signal timing bugs worked around in hardware and there's + * about nothing of note with C stepping upwards. + */ /* * this function sends a 'reschedule' IPI to another CPU. diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c deleted file mode 100644 index d8fdec5f19bc..000000000000 --- a/arch/x86/kernel/smp_32.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Intel SMP support routines. - * - * (c) 1995 Alan Cox, Building #3 - * (c) 1998-99, 2000 Ingo Molnar - * - * This code is released under the GNU General Public License version 2 or - * later. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* - * Some notes on x86 processor bugs affecting SMP operation: - * - * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. - * The Linux implications for SMP are handled as follows: - * - * Pentium III / [Xeon] - * None of the E1AP-E3AP errata are visible to the user. - * - * E1AP. see PII A1AP - * E2AP. see PII A2AP - * E3AP. see PII A3AP - * - * Pentium II / [Xeon] - * None of the A1AP-A3AP errata are visible to the user. - * - * A1AP. see PPro 1AP - * A2AP. see PPro 2AP - * A3AP. see PPro 7AP - * - * Pentium Pro - * None of 1AP-9AP errata are visible to the normal user, - * except occasional delivery of 'spurious interrupt' as trap #15. - * This is very rare and a non-problem. - * - * 1AP. Linux maps APIC as non-cacheable - * 2AP. worked around in hardware - * 3AP. fixed in C0 and above steppings microcode update. - * Linux does not use excessive STARTUP_IPIs. - * 4AP. worked around in hardware - * 5AP. symmetric IO mode (normal Linux operation) not affected. - * 'noapic' mode has vector 0xf filled out properly. - * 6AP. 'noapic' mode might be affected - fixed in later steppings - * 7AP. We do not assume writes to the LVT deassering IRQs - * 8AP. We do not enable low power mode (deep sleep) during MP bootup - * 9AP. We do not use mixed mode - * - * Pentium - * There is a marginal case where REP MOVS on 100MHz SMP - * machines with B stepping processors can fail. XXX should provide - * an L1cache=Writethrough or L1cache=off option. - * - * B stepping CPUs may hang. There are hardware work arounds - * for this. We warn about it in case your board doesn't have the work - * arounds. Basically that's so I can tell anyone with a B stepping - * CPU and SMP problems "tough". - * - * Specific items [From Pentium Processor Specification Update] - * - * 1AP. Linux doesn't use remote read - * 2AP. Linux doesn't trust APIC errors - * 3AP. We work around this - * 4AP. Linux never generated 3 interrupts of the same priority - * to cause a lost local interrupt. - * 5AP. Remote read is never used - * 6AP. not affected - worked around in hardware - * 7AP. not affected - worked around in hardware - * 8AP. worked around in hardware - we get explicit CS errors if not - * 9AP. only 'noapic' mode affected. Might generate spurious - * interrupts, we log only the first one and count the - * rest silently. - * 10AP. not affected - worked around in hardware - * 11AP. Linux reads the APIC between writes to avoid this, as per - * the documentation. Make sure you preserve this as it affects - * the C stepping chips too. - * 12AP. not affected - worked around in hardware - * 13AP. not affected - worked around in hardware - * 14AP. we always deassert INIT during bootup - * 15AP. not affected - worked around in hardware - * 16AP. not affected - worked around in hardware - * 17AP. not affected - worked around in hardware - * 18AP. not affected - worked around in hardware - * 19AP. not affected - worked around in BIOS - * - * If this sounds worrying believe me these bugs are either ___RARE___, - * or are signal timing bugs worked around in hardware and there's - * about nothing of note with C stepping upwards. - */ diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c deleted file mode 100644 index 26448fff0abd..000000000000 --- a/arch/x86/kernel/smp_64.c +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Intel SMP support routines. - * - * (c) 1995 Alan Cox, Building #3 - * (c) 1998-99, 2000 Ingo Molnar - * (c) 2002,2003 Andi Kleen, SuSE Labs. - * - * This code is released under the GNU General Public License version 2 or - * later. - */ -- cgit v1.2.1 From fe6762030ca3728d3e24b556676114a6a64a97be Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:56 -0300 Subject: x86: remove cpu_llc_id from processor.h it is already defined in smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 8bec23c15527..e49e5e69ebb0 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -355,9 +355,7 @@ union i387_union { struct i387_soft_struct soft; }; -#ifdef CONFIG_X86_32 -DECLARE_PER_CPU(u8, cpu_llc_id); -#else +#ifdef CONFIG_X86_64 DECLARE_PER_CPU(struct orig_ist, orig_ist); #endif -- cgit v1.2.1 From 5382e89670399f9db8a58b3c6f850fa4a94f6cca Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:57 -0300 Subject: x86: adjust types in smpcommon_32.c so they can have the same type as x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 6 +++--- include/asm-x86/smp_32.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 579b9b740c7c..5a446f079b33 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -67,7 +67,7 @@ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ -DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID; +DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); @@ -92,10 +92,10 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); /* which logical CPU number maps to which CPU (physical APIC ID) */ -u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata = +u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_cpu_to_apicid_early_ptr; -DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID; +DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u8 apicid_2_node[MAX_APICID]; diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 41b58e0bc75b..29f61952ea8c 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -21,13 +21,13 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); -extern u8 __initdata x86_cpu_to_apicid_init[]; +extern u16 __initdata x86_cpu_to_apicid_init[]; extern void *x86_cpu_to_apicid_early_ptr; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u8, cpu_llc_id); -DECLARE_PER_CPU(u8, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, cpu_llc_id); +DECLARE_PER_CPU(u16, x86_cpu_to_apicid); #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); -- cgit v1.2.1 From a355352b97901d987f54ea7c7d7161eb51a3799c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:58 -0300 Subject: x86: move equal types to common file move definitions that are now equal in type from smpboot_{32,64}.c to smpboot.c cpu_callin_map is put temporarily in smp_64.h (already exists in smp_32.h), and will soon be merged. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 28 ++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 27 --------------------------- arch/x86/kernel/smpboot_64.c | 33 --------------------------------- include/asm-x86/smp_64.h | 1 + 4 files changed, 29 insertions(+), 60 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index bffe10861390..40a3b56952ef 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1,6 +1,34 @@ #include #include +#include +/* Number of siblings per CPU package */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* Last level cache ID of each logical CPU */ +DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; + +/* bitmap of online cpus */ +cpumask_t cpu_online_map __read_mostly; +EXPORT_SYMBOL(cpu_online_map); + +cpumask_t cpu_callin_map; +cpumask_t cpu_callout_map; +cpumask_t cpu_possible_map; +EXPORT_SYMBOL(cpu_possible_map); + +/* representing HT siblings of each logical CPU */ +DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); +EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); + +/* representing HT and core siblings of each logical CPU */ +DEFINE_PER_CPU(cpumask_t, cpu_core_map); +EXPORT_PER_CPU_SYMBOL(cpu_core_map); + +/* Per CPU bogomips and other parameters */ +DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); +EXPORT_PER_CPU_SYMBOL(cpu_info); #ifdef CONFIG_HOTPLUG_CPU int additional_cpus __initdata = -1; diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5a446f079b33..0fbc98163b4e 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -62,35 +62,8 @@ /* Set if we find a B stepping CPU */ static int __cpuinitdata smp_b_stepping; -/* Number of siblings per CPU package */ -int smp_num_siblings = 1; -EXPORT_SYMBOL(smp_num_siblings); - -/* Last level cache ID of each logical CPU */ -DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; - -/* representing HT siblings of each logical CPU */ -DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); -EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); - -/* representing HT and core siblings of each logical CPU */ -DEFINE_PER_CPU(cpumask_t, cpu_core_map); -EXPORT_PER_CPU_SYMBOL(cpu_core_map); - -/* bitmap of online cpus */ -cpumask_t cpu_online_map __read_mostly; -EXPORT_SYMBOL(cpu_online_map); - -cpumask_t cpu_callin_map; -cpumask_t cpu_callout_map; -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); static cpumask_t smp_commenced_mask; -/* Per CPU bogomips and other parameters */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); -EXPORT_PER_CPU_SYMBOL(cpu_info); - /* which logical CPU number maps to which CPU (physical APIC ID) */ u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 953b0ff72b65..c51279f05316 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -60,42 +60,9 @@ #include #include -/* Number of siblings per CPU package */ -int smp_num_siblings = 1; -EXPORT_SYMBOL(smp_num_siblings); - -/* Last level cache ID of each logical CPU */ -DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; - -/* Bitmask of currently online CPUs */ -cpumask_t cpu_online_map __read_mostly; - -EXPORT_SYMBOL(cpu_online_map); - -/* - * Private maps to synchronize booting between AP and BP. - * Probably not needed anymore, but it makes for easier debugging. -AK - */ -cpumask_t cpu_callin_map; -cpumask_t cpu_callout_map; -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); - -/* Per CPU bogomips and other parameters */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); -EXPORT_PER_CPU_SYMBOL(cpu_info); - /* Set when the idlers are all forked */ int smp_threads_ready; -/* representing HT siblings of each logical CPU */ -DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); -EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); - -/* representing HT and core siblings of each logical CPU */ -DEFINE_PER_CPU(cpumask_t, cpu_core_map); -EXPORT_PER_CPU_SYMBOL(cpu_core_map); - /* * Trampoline 80x86 program as an array. */ diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index e5bc1be70827..1ecf8134bdc9 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -14,6 +14,7 @@ #include extern cpumask_t cpu_initialized; +extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -- cgit v1.2.1 From 1452207689b3c0dd2ffed40735289a3a4a8c0c7c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:12:59 -0300 Subject: x86: make set_cpu_sibling_map nonstatic And move its extern definition to smp.h, the common header Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 2 +- include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 3 --- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index c51279f05316..1e8f00a1d624 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -228,7 +228,7 @@ cpumask_t cpu_coregroup_map(int cpu) /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; -static inline void set_cpu_sibling_map(int cpu) +void __cpuinit set_cpu_sibling_map(int cpu) { int i; struct cpuinfo_x86 *c = &cpu_data(cpu); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 2ab8ed4e99e0..1b4481aeb5c8 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -22,6 +22,9 @@ struct smp_ops { int wait); }; +/* Globals due to paravirt */ +extern void set_cpu_sibling_map(int cpu); + #ifdef CONFIG_SMP extern struct smp_ops smp_ops; diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 29f61952ea8c..76247a947a5f 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -35,9 +35,6 @@ extern void cpu_uninit(void); extern void remove_siblinginfo(int cpu); #endif -/* Globals due to paravirt */ -extern void set_cpu_sibling_map(int cpu); - #ifdef CONFIG_SMP #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) -- cgit v1.2.1 From 61d5989973cc52b0ef0f781e870dfe5da6d5023e Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:00 -0300 Subject: x86: make remove_siblinginfo non-static this is done to match i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 1e8f00a1d624..20f1c7df86a3 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -918,7 +918,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) #ifdef CONFIG_HOTPLUG_CPU -static void remove_siblinginfo(int cpu) +void remove_siblinginfo(int cpu) { int sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); -- cgit v1.2.1 From 1dbb4726faebe9e64a1e9cf40e3b39fffa065a65 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:01 -0300 Subject: x86: move hotplug related extern definitions to smp.h definitions that are inside CONFIG_HOTPLUG_CPU in the arch-specific smp*.h files are moved to common header Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 6 ++++++ include/asm-x86/smp_32.h | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 1b4481aeb5c8..c800b815d378 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -80,6 +80,12 @@ extern void prefill_possible_map(void); # include "smp_64.h" #endif +#ifdef CONFIG_HOTPLUG_CPU +extern void cpu_exit_clear(void); +extern void cpu_uninit(void); +extern void remove_siblinginfo(int cpu); +#endif + extern void smp_alloc_memory(void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 76247a947a5f..0b2513468870 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -29,12 +29,6 @@ DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -#ifdef CONFIG_HOTPLUG_CPU -extern void cpu_exit_clear(void); -extern void cpu_uninit(void); -extern void remove_siblinginfo(int cpu); -#endif - #ifdef CONFIG_SMP #ifndef CONFIG_PARAVIRT #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) -- cgit v1.2.1 From 768d95051bdaf60b4eb89b42c133b14627f478f2 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:02 -0300 Subject: x86: move sibling functions to common file set_cpu_sibling_map() and remove_sibling_info() are equal between architectures, and are now moved to common file Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 88 +++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 88 ------------------------------------------- arch/x86/kernel/smpboot_64.c | 89 -------------------------------------------- 3 files changed, 88 insertions(+), 177 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 40a3b56952ef..d774520a6b48 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -29,7 +29,95 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); /* Per CPU bogomips and other parameters */ DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); + +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + +void __cpuinit set_cpu_sibling_map(int cpu) +{ + int i; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + cpu_set(cpu, cpu_sibling_setup_map); + + if (smp_num_siblings > 1) { + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (c->phys_proc_id == cpu_data(i).phys_proc_id && + c->cpu_core_id == cpu_data(i).cpu_core_id) { + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); + cpu_set(i, per_cpu(cpu_core_map, cpu)); + cpu_set(cpu, per_cpu(cpu_core_map, i)); + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); + } + } + } else { + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); + } + + cpu_set(cpu, c->llc_shared_map); + + if (current_cpu_data.x86_max_cores == 1) { + per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); + c->booted_cores = 1; + return; + } + + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && + per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); + } + if (c->phys_proc_id == cpu_data(i).phys_proc_id) { + cpu_set(i, per_cpu(cpu_core_map, cpu)); + cpu_set(cpu, per_cpu(cpu_core_map, i)); + /* + * Does this new cpu bringup a new core? + */ + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { + /* + * for each core in package, increment + * the booted_cores for this new cpu + */ + if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) + c->booted_cores++; + /* + * increment the core count for all + * the other cpus in this package + */ + if (i != cpu) + cpu_data(i).booted_cores++; + } else if (i != cpu && !c->booted_cores) + c->booted_cores = cpu_data(i).booted_cores; + } + } +} + #ifdef CONFIG_HOTPLUG_CPU +void remove_siblinginfo(int cpu) +{ + int sibling; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { + cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); + /*/ + * last thread sibling in this cpu core going down + */ + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) + cpu_data(sibling).booted_cores--; + } + + for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) + cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); + cpus_clear(per_cpu(cpu_sibling_map, cpu)); + cpus_clear(per_cpu(cpu_core_map, cpu)); + c->phys_proc_id = 0; + c->cpu_core_id = 0; + cpu_clear(cpu, cpu_sibling_setup_map); +} int additional_cpus __initdata = -1; diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 0fbc98163b4e..322f46674d42 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -274,71 +274,6 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } -/* representing cpus for which sibling maps can be computed */ -static cpumask_t cpu_sibling_setup_map; - -void __cpuinit set_cpu_sibling_map(int cpu) -{ - int i; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - cpu_set(cpu, cpu_sibling_setup_map); - - if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (c->phys_proc_id == cpu_data(i).phys_proc_id && - c->cpu_core_id == cpu_data(i).cpu_core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - } - } else { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); - } - - cpu_set(cpu, c->llc_shared_map); - - if (current_cpu_data.x86_max_cores == 1) { - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); - c->booted_cores = 1; - return; - } - - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && - per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - if (c->phys_proc_id == cpu_data(i).phys_proc_id) { - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - /* - * Does this new cpu bringup a new core? - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { - /* - * for each core in package, increment - * the booted_cores for this new cpu - */ - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) - c->booted_cores++; - /* - * increment the core count for all - * the other cpus in this package - */ - if (i != cpu) - cpu_data(i).booted_cores++; - } else if (i != cpu && !c->booted_cores) - c->booted_cores = cpu_data(i).booted_cores; - } - } -} - /* * Activate a secondary processor. */ @@ -1120,29 +1055,6 @@ void __init native_smp_prepare_boot_cpu(void) } #ifdef CONFIG_HOTPLUG_CPU -void remove_siblinginfo(int cpu) -{ - int sibling; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); - /*/ - * last thread sibling in this cpu core going down - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) - cpu_data(sibling).booted_cores--; - } - - for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); - c->phys_proc_id = 0; - c->cpu_core_id = 0; - cpu_clear(cpu, cpu_sibling_setup_map); -} - int __cpu_disable(void) { cpumask_t map = cpu_online_map; diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 20f1c7df86a3..329f9c53a335 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -225,71 +225,6 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } -/* representing cpus for which sibling maps can be computed */ -static cpumask_t cpu_sibling_setup_map; - -void __cpuinit set_cpu_sibling_map(int cpu) -{ - int i; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - cpu_set(cpu, cpu_sibling_setup_map); - - if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (c->phys_proc_id == cpu_data(i).phys_proc_id && - c->cpu_core_id == cpu_data(i).cpu_core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - } - } else { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); - } - - cpu_set(cpu, c->llc_shared_map); - - if (current_cpu_data.x86_max_cores == 1) { - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); - c->booted_cores = 1; - return; - } - - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && - per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - if (c->phys_proc_id == cpu_data(i).phys_proc_id) { - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - /* - * Does this new cpu bringup a new core? - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { - /* - * for each core in package, increment - * the booted_cores for this new cpu - */ - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) - c->booted_cores++; - /* - * increment the core count for all - * the other cpus in this package - */ - if (i != cpu) - cpu_data(i).booted_cores++; - } else if (i != cpu && !c->booted_cores) - c->booted_cores = cpu_data(i).booted_cores; - } - } -} - /* * Setup code on secondary processor (after comming out of the trampoline) */ @@ -917,30 +852,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) } #ifdef CONFIG_HOTPLUG_CPU - -void remove_siblinginfo(int cpu) -{ - int sibling; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); - /* - * last thread sibling in this cpu core going down - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) - cpu_data(sibling).booted_cores--; - } - - for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); - c->phys_proc_id = 0; - c->cpu_core_id = 0; - cpu_clear(cpu, cpu_sibling_setup_map); -} - static void __ref remove_cpu_from_maps(void) { int cpu = smp_processor_id(); -- cgit v1.2.1 From 70708a18e834fd709a4f497bb419ec84d1eb3511 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:03 -0300 Subject: x86: move cpu_coregroup_map to common file it is equal between architectures Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 16 ++++++++++++++++ arch/x86/kernel/smpboot_32.c | 14 -------------- arch/x86/kernel/smpboot_64.c | 14 -------------- 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d774520a6b48..644e60969f90 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1,6 +1,7 @@ #include #include #include +#include /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -95,6 +96,21 @@ void __cpuinit set_cpu_sibling_map(int cpu) } } +/* maps the cpu to the sched domain representing multi-core */ +cpumask_t cpu_coregroup_map(int cpu) +{ + struct cpuinfo_x86 *c = &cpu_data(cpu); + /* + * For perf, we return last level cache shared map. + * And for power savings, we return cpu_core_map + */ + if (sched_mc_power_savings || sched_smt_power_savings) + return per_cpu(cpu_core_map, cpu); + else + return c->llc_shared_map; +} + + #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) { diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 322f46674d42..a58ca7f18013 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -260,20 +260,6 @@ static void __cpuinit smp_callin(void) static int cpucount; -/* maps the cpu to the sched domain representing multi-core */ -cpumask_t cpu_coregroup_map(int cpu) -{ - struct cpuinfo_x86 *c = &cpu_data(cpu); - /* - * For perf, we return last level cache shared map. - * And for power savings, we return cpu_core_map - */ - if (sched_mc_power_savings || sched_smt_power_savings) - return per_cpu(cpu_core_map, cpu); - else - return c->llc_shared_map; -} - /* * Activate a secondary processor. */ diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 329f9c53a335..1a592400d94f 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -211,20 +211,6 @@ void __cpuinit smp_callin(void) cpu_set(cpuid, cpu_callin_map); } -/* maps the cpu to the sched domain representing multi-core */ -cpumask_t cpu_coregroup_map(int cpu) -{ - struct cpuinfo_x86 *c = &cpu_data(cpu); - /* - * For perf, we return last level cache shared map. - * And for power savings, we return cpu_core_map - */ - if (sched_mc_power_savings || sched_smt_power_savings) - return per_cpu(cpu_core_map, cpu); - else - return c->llc_shared_map; -} - /* * Setup code on secondary processor (after comming out of the trampoline) */ -- cgit v1.2.1 From fc25da9ec6c910976b76c70f7604a838679f75b2 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:04 -0300 Subject: x86: remove vector_lock around cpu_online_map This lock does not protect cpu_online_map, so its length can be shortened, and in some cases, removed. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 1a592400d94f..ca3a3c5b64fe 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -261,9 +261,9 @@ void __cpuinit start_secondary(void) /* * Allow the master to continue. */ + spin_unlock(&vector_lock); cpu_set(smp_processor_id(), cpu_online_map); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; - spin_unlock(&vector_lock); unlock_ipi_call_lock(); @@ -879,10 +879,8 @@ int __cpu_disable(void) local_irq_disable(); remove_siblinginfo(cpu); - spin_lock(&vector_lock); /* It's now safe to remove this processor from the online map */ cpu_clear(cpu, cpu_online_map); - spin_unlock(&vector_lock); remove_cpu_from_maps(); fixup_irqs(cpu_online_map); return 0; -- cgit v1.2.1 From 045f9d22029e94d6609d46f8ee07c63f4693dfb3 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:05 -0300 Subject: x86: use remove_from_maps in cpu_disable it is already used in x86_64. In i386, it only removes from cpu_online_map Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 7 ++++++- arch/x86/kernel/smpboot_64.c | 8 +++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index a58ca7f18013..4939b3a01b24 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -1041,6 +1041,11 @@ void __init native_smp_prepare_boot_cpu(void) } #ifdef CONFIG_HOTPLUG_CPU +static void __ref remove_cpu_from_maps(int cpu) +{ + cpu_clear(cpu, cpu_online_map); +} + int __cpu_disable(void) { cpumask_t map = cpu_online_map; @@ -1066,7 +1071,7 @@ int __cpu_disable(void) remove_siblinginfo(cpu); - cpu_clear(cpu, map); + remove_cpu_from_maps(cpu); fixup_irqs(map); /* It's now safe to remove this processor from the online map */ cpu_clear(cpu, cpu_online_map); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index ca3a3c5b64fe..6509d3c1b3df 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -838,10 +838,9 @@ void __init native_smp_cpus_done(unsigned int max_cpus) } #ifdef CONFIG_HOTPLUG_CPU -static void __ref remove_cpu_from_maps(void) +static void __ref remove_cpu_from_maps(int cpu) { - int cpu = smp_processor_id(); - + cpu_clear(cpu, cpu_online_map); cpu_clear(cpu, cpu_callout_map); cpu_clear(cpu, cpu_callin_map); clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ @@ -880,8 +879,7 @@ int __cpu_disable(void) remove_siblinginfo(cpu); /* It's now safe to remove this processor from the online map */ - cpu_clear(cpu, cpu_online_map); - remove_cpu_from_maps(); + remove_cpu_from_maps(cpu); fixup_irqs(cpu_online_map); return 0; } -- cgit v1.2.1 From e9a6cb96fafa4d4df2033ab6cf9c817f6f47e052 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:06 -0300 Subject: x86: do not clear cpu_online_map it was already cleared two lines above, and so, this removal is bogus Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 4939b3a01b24..3d21c663aa76 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -1073,8 +1073,7 @@ int __cpu_disable(void) remove_cpu_from_maps(cpu); fixup_irqs(map); - /* It's now safe to remove this processor from the online map */ - cpu_clear(cpu, cpu_online_map); + return 0; } -- cgit v1.2.1 From 69c18c15d39c4622c6e2f97e5db4d8c9c43adaaa Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:07 -0300 Subject: x86: merge __cpu_disable and cpu_die They are now equal, and are moved to a common file Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 85 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 67 ---------------------------------- arch/x86/kernel/smpboot_64.c | 79 ---------------------------------------- include/asm-x86/smp.h | 3 ++ include/asm-x86/smp_32.h | 3 -- include/asm-x86/smp_64.h | 3 -- 6 files changed, 88 insertions(+), 152 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 644e60969f90..c35cd319d1ed 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -2,6 +2,13 @@ #include #include #include +#include + +#include +#include +#include +#include +#include /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -181,5 +188,83 @@ __init void prefill_possible_map(void) for (i = 0; i < possible; i++) cpu_set(i, cpu_possible_map); } + +static void __ref remove_cpu_from_maps(int cpu) +{ + cpu_clear(cpu, cpu_online_map); +#ifdef CONFIG_X86_64 + cpu_clear(cpu, cpu_callout_map); + cpu_clear(cpu, cpu_callin_map); + /* was set by cpu_init() */ + clear_bit(cpu, (unsigned long *)&cpu_initialized); + clear_node_cpumask(cpu); +#endif +} + +int __cpu_disable(void) +{ + int cpu = smp_processor_id(); + + /* + * Perhaps use cpufreq to drop frequency, but that could go + * into generic code. + * + * We won't take down the boot processor on i386 due to some + * interrupts only being able to be serviced by the BSP. + * Especially so if we're not using an IOAPIC -zwane + */ + if (cpu == 0) + return -EBUSY; + + if (nmi_watchdog == NMI_LOCAL_APIC) + stop_apic_nmi_watchdog(NULL); + clear_local_APIC(); + + /* + * HACK: + * Allow any queued timer interrupts to get serviced + * This is only a temporary solution until we cleanup + * fixup_irqs as we do for IA64. + */ + local_irq_enable(); + mdelay(1); + + local_irq_disable(); + remove_siblinginfo(cpu); + + /* It's now safe to remove this processor from the online map */ + remove_cpu_from_maps(cpu); + fixup_irqs(cpu_online_map); + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + /* We don't do anything here: idle task is faking death itself. */ + unsigned int i; + + for (i = 0; i < 10; i++) { + /* They ack this in play_dead by setting CPU_DEAD */ + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + printk(KERN_INFO "CPU %d is now offline\n", cpu); + if (1 == num_online_cpus()) + alternatives_smp_switch(0); + return; + } + msleep(100); + } + printk(KERN_ERR "CPU %u didn't die...\n", cpu); +} +#else /* ... !CONFIG_HOTPLUG_CPU */ +int __cpu_disable(void) +{ + return -ENOSYS; +} + +void __cpu_die(unsigned int cpu) +{ + /* We said "no" in __cpu_disable */ + BUG(); +} #endif diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 3d21c663aa76..00b1b59cd560 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -1040,73 +1040,6 @@ void __init native_smp_prepare_boot_cpu(void) __get_cpu_var(cpu_state) = CPU_ONLINE; } -#ifdef CONFIG_HOTPLUG_CPU -static void __ref remove_cpu_from_maps(int cpu) -{ - cpu_clear(cpu, cpu_online_map); -} - -int __cpu_disable(void) -{ - cpumask_t map = cpu_online_map; - int cpu = smp_processor_id(); - - /* - * Perhaps use cpufreq to drop frequency, but that could go - * into generic code. - * - * We won't take down the boot processor on i386 due to some - * interrupts only being able to be serviced by the BSP. - * Especially so if we're not using an IOAPIC -zwane - */ - if (cpu == 0) - return -EBUSY; - if (nmi_watchdog == NMI_LOCAL_APIC) - stop_apic_nmi_watchdog(NULL); - clear_local_APIC(); - /* Allow any queued timer interrupts to get serviced */ - local_irq_enable(); - mdelay(1); - local_irq_disable(); - - remove_siblinginfo(cpu); - - remove_cpu_from_maps(cpu); - fixup_irqs(map); - - return 0; -} - -void __cpu_die(unsigned int cpu) -{ - /* We don't do anything here: idle task is faking death itself. */ - unsigned int i; - - for (i = 0; i < 10; i++) { - /* They ack this in play_dead by setting CPU_DEAD */ - if (per_cpu(cpu_state, cpu) == CPU_DEAD) { - printk ("CPU %d is now offline\n", cpu); - if (1 == num_online_cpus()) - alternatives_smp_switch(0); - return; - } - msleep(100); - } - printk(KERN_ERR "CPU %u didn't die...\n", cpu); -} -#else /* ... !CONFIG_HOTPLUG_CPU */ -int __cpu_disable(void) -{ - return -ENOSYS; -} - -void __cpu_die(unsigned int cpu) -{ - /* We said "no" in __cpu_disable */ - BUG(); -} -#endif /* CONFIG_HOTPLUG_CPU */ - int __cpuinit native_cpu_up(unsigned int cpu) { unsigned long flags; diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 6509d3c1b3df..0c67e5ae9c9d 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -836,82 +836,3 @@ void __init native_smp_cpus_done(unsigned int max_cpus) setup_ioapic_dest(); check_nmi_watchdog(); } - -#ifdef CONFIG_HOTPLUG_CPU -static void __ref remove_cpu_from_maps(int cpu) -{ - cpu_clear(cpu, cpu_online_map); - cpu_clear(cpu, cpu_callout_map); - cpu_clear(cpu, cpu_callin_map); - clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ - clear_node_cpumask(cpu); -} - -int __cpu_disable(void) -{ - int cpu = smp_processor_id(); - - /* - * Perhaps use cpufreq to drop frequency, but that could go - * into generic code. - * - * We won't take down the boot processor on i386 due to some - * interrupts only being able to be serviced by the BSP. - * Especially so if we're not using an IOAPIC -zwane - */ - if (cpu == 0) - return -EBUSY; - - if (nmi_watchdog == NMI_LOCAL_APIC) - stop_apic_nmi_watchdog(NULL); - clear_local_APIC(); - - /* - * HACK: - * Allow any queued timer interrupts to get serviced - * This is only a temporary solution until we cleanup - * fixup_irqs as we do for IA64. - */ - local_irq_enable(); - mdelay(1); - - local_irq_disable(); - remove_siblinginfo(cpu); - - /* It's now safe to remove this processor from the online map */ - remove_cpu_from_maps(cpu); - fixup_irqs(cpu_online_map); - return 0; -} - -void __cpu_die(unsigned int cpu) -{ - /* We don't do anything here: idle task is faking death itself. */ - unsigned int i; - - for (i = 0; i < 10; i++) { - /* They ack this in play_dead by setting CPU_DEAD */ - if (per_cpu(cpu_state, cpu) == CPU_DEAD) { - printk ("CPU %d is now offline\n", cpu); - if (1 == num_online_cpus()) - alternatives_smp_switch(0); - return; - } - msleep(100); - } - printk(KERN_ERR "CPU %u didn't die...\n", cpu); -} - -#else /* ... !CONFIG_HOTPLUG_CPU */ - -int __cpu_disable(void) -{ - return -ENOSYS; -} - -void __cpu_die(unsigned int cpu) -{ - /* We said "no" in __cpu_disable */ - BUG(); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c800b815d378..27d9f6595232 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -70,6 +70,9 @@ void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); + extern unsigned disabled_cpus; extern void prefill_possible_map(void); #endif diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 0b2513468870..4fec2feb6ac8 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -34,9 +34,6 @@ DECLARE_PER_CPU(u16, x86_cpu_to_apicid); #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) #endif -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); - /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 1ecf8134bdc9..d554d7d57327 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -42,9 +42,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) #define SMP_TRAMPOLINE_BASE 0x6000 -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); - #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.1 From 89b08200ad8bc8fb860da218c4f3bcc292bf286c Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:08 -0300 Subject: x86: make x86_64 accept the max_cpus parameter The parameter passing parsing is done in the common smpboot.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 7 +++++++ arch/x86/kernel/smpboot.c | 12 ++++++++++++ arch/x86/kernel/smpboot_32.c | 13 ------------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 72ab1403fed7..2a1f7881c75b 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -32,6 +32,7 @@ /* Have we found an MP table */ int smp_found_config; +unsigned int __cpuinitdata maxcpus = NR_CPUS; /* * Various Linux-internal data structures created from the @@ -115,6 +116,12 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) return; } + if (num_processors >= maxcpus) { + printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." + " Processor ignored.\n", maxcpus); + return; + } + num_processors++; cpus_complement(tmp_map, cpu_present_map); cpu = first_cpu(tmp_map); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c35cd319d1ed..34c31178041b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -268,3 +268,15 @@ void __cpu_die(unsigned int cpu) } #endif +/* + * If the BIOS enumerates physical processors before logical, + * maxcpus=N at enumeration-time can be used to disable HT. + */ +static int __init parse_maxcpus(char *arg) +{ + extern unsigned int maxcpus; + + maxcpus = simple_strtoul(arg, NULL, 0); + return 0; +} +early_param("maxcpus", parse_maxcpus); diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 00b1b59cd560..3236e843a9ad 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -1113,16 +1113,3 @@ void __init smp_intr_init(void) /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); } - -/* - * If the BIOS enumerates physical processors before logical, - * maxcpus=N at enumeration-time can be used to disable HT. - */ -static int __init parse_maxcpus(char *arg) -{ - extern unsigned int maxcpus; - - maxcpus = simple_strtoul(arg, NULL, 0); - return 0; -} -early_param("maxcpus", parse_maxcpus); -- cgit v1.2.1 From 420688293927a590d092ec76ef97c2565ae21aff Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:09 -0300 Subject: x86: move trampoline arrays extern definition to smp.h In here, they can serve both architectures Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 6 ------ arch/x86/kernel/smpboot_64.c | 7 ------- include/asm-x86/smp.h | 6 ++++++ 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 3236e843a9ad..a21f25418b3e 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -73,12 +73,6 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u8 apicid_2_node[MAX_APICID]; -/* - * Trampoline 80x86 program as an array. - */ - -extern const unsigned char trampoline_data []; -extern const unsigned char trampoline_end []; static unsigned char *trampoline_base; static void map_cpu_to_logical_apicid(void); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 0c67e5ae9c9d..2cc1b8b0601c 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -63,13 +63,6 @@ /* Set when the idlers are all forked */ int smp_threads_ready; -/* - * Trampoline 80x86 program as an array. - */ - -extern const unsigned char trampoline_data[]; -extern const unsigned char trampoline_end[]; - /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 27d9f6595232..b2a1697e4700 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -9,6 +9,12 @@ extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +/* + * Trampoline 80x86 program as an array. + */ +extern const unsigned char trampoline_data []; +extern const unsigned char trampoline_end []; + struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); -- cgit v1.2.1 From d507897b2f179a9b30ce2f91b768ed2ee84575bc Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:10 -0300 Subject: x86: adapt voyager's trampoline_base Change voyager's trampoline base to unsigned char * instead of u32. This way, it won't conflict with the other architectures when including smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/mach-voyager/voyager_smp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 3cc8eb2f36a9..20b7ce697ee8 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -210,7 +210,7 @@ static int cpucount = 0; /* steal a page from the bottom of memory for the trampoline and * squirrel its address away here. This will be in kernel virtual * space */ -static __u32 trampoline_base; +unsigned char *trampoline_base; /* The per cpu profile stuff - used in smp_local_timer_interrupt */ static DEFINE_PER_CPU(int, prof_multiplier) = 1; @@ -435,9 +435,9 @@ static __u32 __init setup_trampoline(void) extern const __u8 trampoline_end[]; extern const __u8 trampoline_data[]; - memcpy((__u8 *) trampoline_base, trampoline_data, + memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); - return virt_to_phys((__u8 *) trampoline_base); + return virt_to_phys(trampoline_base); } /* Routine initially called when a non-boot CPU is brought online */ @@ -1166,7 +1166,7 @@ void flush_tlb_all(void) * is sorted out */ void __init smp_alloc_memory(void) { - trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE); + trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); if (__pa(trampoline_base) >= 0x93000) BUG(); } -- cgit v1.2.1 From da522b07293756b9cb4e2c570454f95b8e79e189 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:11 -0300 Subject: x86: adapt voyager's setup_trampoline make setup_trampoline non-static. This way, it won't conflict with the extern declaration in smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/mach-voyager/voyager_smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 20b7ce697ee8..11b806c7ed94 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -429,7 +429,7 @@ void __init smp_store_cpu_info(int id) } /* set up the trampoline and return the physical address of the code */ -static __u32 __init setup_trampoline(void) +unsigned long __init setup_trampoline(void) { /* these two are global symbols in trampoline.S */ extern const __u8 trampoline_end[]; -- cgit v1.2.1 From 91718e8d13c23bfe0aa6fa6b730c5c33ee9771bf Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:12 -0300 Subject: x86: unify setup_trampoline setup_trampoline() looks very similar between architectures, and this patch unifies them. The i386 version allocates bootmem memory, while the x86_64 version uses a fixed address. In this patch, we initialize the global trampoline_base to the x86_64 version, and i386 allocation can later override it. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 33 +++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 29 ----------------------------- arch/x86/kernel/smpboot_64.c | 14 -------------- include/asm-x86/smp.h | 4 ++++ include/asm-x86/smp_64.h | 2 -- 5 files changed, 37 insertions(+), 45 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 34c31178041b..b13b9d55f9ce 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -38,6 +39,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); +/* ready for x86_64, no harm for x86, since it will overwrite after alloc */ +unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -117,6 +121,35 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } +/* + * Currently trivial. Write the real->protected mode + * bootstrap into the page concerned. The caller + * has made sure it's suitably aligned. + */ + +unsigned long __cpuinit setup_trampoline(void) +{ + memcpy(trampoline_base, trampoline_data, + trampoline_end - trampoline_data); + return virt_to_phys(trampoline_base); +} + +#ifdef CONFIG_X86_32 +/* + * We are called very early to get the low memory for the + * SMP bootup trampoline page. + */ +void __init smp_alloc_memory(void) +{ + trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); + /* + * Has to be in very low memory so we can execute + * real-mode AP code. + */ + if (__pa(trampoline_base) >= 0x9F000) + BUG(); +} +#endif #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index a21f25418b3e..ee826594aa03 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -73,40 +73,11 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u8 apicid_2_node[MAX_APICID]; -static unsigned char *trampoline_base; - static void map_cpu_to_logical_apicid(void); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -/* - * Currently trivial. Write the real->protected mode - * bootstrap into the page concerned. The caller - * has made sure it's suitably aligned. - */ - -static unsigned long __cpuinit setup_trampoline(void) -{ - memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); - return virt_to_phys(trampoline_base); -} - -/* - * We are called very early to get the low memory for the - * SMP bootup trampoline page. - */ -void __init smp_alloc_memory(void) -{ - trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); - /* - * Has to be in very low memory so we can execute - * real-mode AP code. - */ - if (__pa(trampoline_base) >= 0x9F000) - BUG(); -} - /* * The bootstrap kernel entry code has set these up. Save them for * a given CPU diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 2cc1b8b0601c..9f4935e70e72 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -85,20 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) #endif - -/* - * Currently trivial. Write the real->protected mode - * bootstrap into the page concerned. The caller - * has made sure it's suitably aligned. - */ - -static unsigned long __cpuinit setup_trampoline(void) -{ - void *tramp = __va(SMP_TRAMPOLINE_BASE); - memcpy(tramp, trampoline_data, trampoline_end - trampoline_data); - return virt_to_phys(tramp); -} - /* * The bootstrap kernel entry code has set these up. Save them for * a given CPU diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b2a1697e4700..513c8571a4a0 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -14,6 +14,7 @@ extern unsigned int num_processors; */ extern const unsigned char trampoline_data []; extern const unsigned char trampoline_end []; +extern unsigned char *trampoline_base; struct smp_ops { void (*smp_prepare_boot_cpu)(void); @@ -81,6 +82,9 @@ extern void __cpu_die(unsigned int cpu); extern unsigned disabled_cpus; extern void prefill_possible_map(void); + +#define SMP_TRAMPOLINE_BASE 0x6000 +extern unsigned long setup_trampoline(void); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index d554d7d57327..394c78524331 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -40,8 +40,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) #ifdef CONFIG_SMP -#define SMP_TRAMPOLINE_BASE 0x6000 - #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- cgit v1.2.1 From e90009bcc1137c51d677262417f16c00ad2ce9a9 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:13 -0300 Subject: x86: use wait_for_init_deassert in x86_64 wraps the busy loop for wait_for_init_deasserted() in a function, so smp_callin in x86_64 looks like more i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 9f4935e70e72..4f6d9768648f 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -100,6 +100,13 @@ static void __cpuinit smp_store_cpu_info(int id) print_cpu_info(c); } +static inline void wait_for_init_deassert(atomic_t *deassert) +{ + while (!atomic_read(deassert)) + cpu_relax(); + return; +} + static atomic_t init_deasserted __cpuinitdata; /* @@ -117,8 +124,7 @@ void __cpuinit smp_callin(void) * our local APIC. We have to wait for the IPI or we'll * lock up on an APIC access. */ - while (!atomic_read(&init_deasserted)) - cpu_relax(); + wait_for_init_deassert(&init_deasserted); /* * (This works even if the APIC is not enabled.) -- cgit v1.2.1 From e104383fbf26570968cbf060955f67cd5378300a Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:14 -0300 Subject: x86: use cpu_relax instead of rep_nop This is done for smpboot_32.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index ee826594aa03..2dd95bae2b96 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -185,7 +185,7 @@ static void __cpuinit smp_callin(void) */ if (cpu_isset(cpuid, cpu_callout_map)) break; - rep_nop(); + cpu_relax(); } if (!time_before(jiffies, timeout)) { @@ -242,7 +242,7 @@ static void __cpuinit start_secondary(void *unused) preempt_disable(); smp_callin(); while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) - rep_nop(); + cpu_relax(); /* * Check TSC synchronization with the BP: */ -- cgit v1.2.1 From ca9cda2f7b53da619fabde4c0c1bd5f61039bd5b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 15:15:42 +0100 Subject: x86: add comments to processor.h add comments to the FPU structures of processor.h. Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 51 +++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index e49e5e69ebb0..1f9501a38493 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -289,42 +289,47 @@ struct orig_ist { #define MXCSR_DEFAULT 0x1f80 struct i387_fsave_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 cwd; /* FPU Control Word */ + u32 swd; /* FPU Status Word */ + u32 twd; /* FPU Tag Word */ + u32 fip; /* FPU IP Offset */ + u32 fcs; /* FPU IP Selector */ + u32 foo; /* FPU Operand Pointer Offset */ + u32 fos; /* FPU Operand Pointer Selector */ + + /* 8*10 bytes for each FP-reg = 80 bytes: */ u32 st_space[20]; - /* Software status information: */ + + /* Software status information [not touched by FSAVE ]: */ u32 status; }; struct i387_fxsave_struct { - u16 cwd; - u16 swd; - u16 twd; - u16 fop; + u16 cwd; /* Control Word */ + u16 swd; /* Status Word */ + u16 twd; /* Tag Word */ + u16 fop; /* Last Instruction Opcode */ union { struct { - u64 rip; - u64 rdp; + u64 rip; /* Instruction Pointer */ + u64 rdp; /* Data Pointer */ }; struct { - u32 fip; - u32 fcs; - u32 foo; - u32 fos; + u32 fip; /* FPU IP Offset */ + u32 fcs; /* FPU IP Selector */ + u32 foo; /* FPU Operand Offset */ + u32 fos; /* FPU Operand Selector */ }; }; - u32 mxcsr; - u32 mxcsr_mask; - /* 8*16 bytes for each FP-reg = 128 bytes: */ + u32 mxcsr; /* MXCSR Register State */ + u32 mxcsr_mask; /* MXCSR Mask */ + + /* 8*16 bytes for each FP-reg = 128 bytes: */ u32 st_space[32]; - /* 16*16 bytes for each XMM-reg = 256 bytes: */ + + /* 16*16 bytes for each XMM-reg = 256 bytes: */ u32 xmm_space[64]; + u32 padding[24]; } __attribute__((aligned(16))); -- cgit v1.2.1 From f668964ea1485c64cc9ab0721679fe9cd90cc406 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 5 Mar 2008 15:37:32 +0100 Subject: x86: clean up i387.c minor coding style cleanups. Before: total: 0 errors, 3 warnings, 479 lines checked After: total: 0 errors, 1 warnings, 483 lines checked No code changed: arch/x86/kernel/i387.o: text data bss dec hex filename 2379 4 8 2391 957 i387.o.before 2379 4 8 2391 957 i387.o.after md5: e1434553a3b4ff1f52ad97a68b1fad8a i387.o.before.asm e1434553a3b4ff1f52ad97a68b1fad8a i387.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/i387.c | 88 ++++++++++++++++++++++++++------------------------ 1 file changed, 46 insertions(+), 42 deletions(-) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index d2e39e69aaf8..8f8102d967b3 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -5,45 +5,41 @@ * General FPU state handling cleanups * Gareth Hughes , May 2000 */ - -#include #include #include +#include + +#include #include -#include #include -#include -#include -#include #include +#include +#include +#include #ifdef CONFIG_X86_64 - -#include -#include - +# include +# include #else - -#define save_i387_ia32 save_i387 -#define restore_i387_ia32 restore_i387 - -#define _fpstate_ia32 _fpstate -#define user_i387_ia32_struct user_i387_struct -#define user32_fxsr_struct user_fxsr_struct - +# define save_i387_ia32 save_i387 +# define restore_i387_ia32 restore_i387 +# define _fpstate_ia32 _fpstate +# define user_i387_ia32_struct user_i387_struct +# define user32_fxsr_struct user_fxsr_struct #endif #ifdef CONFIG_MATH_EMULATION -#define HAVE_HWFP (boot_cpu_data.hard_math) +# define HAVE_HWFP (boot_cpu_data.hard_math) #else -#define HAVE_HWFP 1 +# define HAVE_HWFP 1 #endif -static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; +static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; void mxcsr_feature_mask_init(void) { unsigned long mask = 0; + clts(); if (cpu_has_fxsr) { memset(¤t->thread.i387.fxsave, 0, @@ -69,10 +65,11 @@ void __cpuinit fpu_init(void) if (offsetof(struct task_struct, thread.i387.fxsave) & 15) __bad_fxsave_alignment(); + set_in_cr4(X86_CR4_OSFXSR); set_in_cr4(X86_CR4_OSXMMEXCPT); - write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */ + write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ mxcsr_feature_mask_init(); /* clean state in init */ @@ -178,6 +175,7 @@ static inline unsigned short twd_i387_to_fxsr(unsigned short twd) tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ + return tmp; } @@ -232,8 +230,8 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) * FXSR floating point environment conversions. */ -static void convert_from_fxsr(struct user_i387_ia32_struct *env, - struct task_struct *tsk) +static void +convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave; struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; @@ -252,10 +250,11 @@ static void convert_from_fxsr(struct user_i387_ia32_struct *env, * should be actually ds/cs at fpu exception time, but * that information is not available in 64bit mode. */ - asm("mov %%ds,%0" : "=r" (env->fos)); - asm("mov %%cs,%0" : "=r" (env->fcs)); + asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos)); + asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs)); } else { struct pt_regs *regs = task_pt_regs(tsk); + env->fos = 0xffff0000 | tsk->thread.ds; env->fcs = regs->cs; } @@ -309,9 +308,10 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, init_fpu(target); - if (!cpu_has_fxsr) + if (!cpu_has_fxsr) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.i387.fsave, 0, -1); + } if (kbuf && pos == 0 && count == sizeof(env)) { convert_from_fxsr(kbuf, target); @@ -319,6 +319,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, } convert_from_fxsr(&env, target); + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); } @@ -335,9 +336,10 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, init_fpu(target); set_stopped_child_used_math(target); - if (!cpu_has_fxsr) + if (!cpu_has_fxsr) { return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.i387.fsave, 0, -1); + } if (pos > 0 || count < sizeof(env)) convert_from_fxsr(&env, target); @@ -392,28 +394,28 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf) { if (!used_math()) return 0; - - /* This will cause a "finit" to be triggered by the next + /* + * This will cause a "finit" to be triggered by the next * attempted FPU operation by the 'current' process. */ clear_used_math(); - if (HAVE_HWFP) { - if (cpu_has_fxsr) { - return save_i387_fxsave(buf); - } else { - return save_i387_fsave(buf); - } - } else { + if (!HAVE_HWFP) { return fpregs_soft_get(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, buf) ? -1 : 1; } + + if (cpu_has_fxsr) + return save_i387_fxsave(buf); + else + return save_i387_fsave(buf); } static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) { struct task_struct *tsk = current; + clear_fpu(tsk); return __copy_from_user(&tsk->thread.i387.fsave, buf, sizeof(struct i387_fsave_struct)); @@ -421,9 +423,10 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) { - int err; struct task_struct *tsk = current; struct user_i387_ia32_struct env; + int err; + clear_fpu(tsk); err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0], sizeof(struct i387_fxsave_struct)); @@ -432,6 +435,7 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) if (err || __copy_from_user(&env, buf, sizeof(env))) return 1; convert_to_fxsr(tsk, &env); + return 0; } @@ -440,17 +444,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) int err; if (HAVE_HWFP) { - if (cpu_has_fxsr) { + if (cpu_has_fxsr) err = restore_i387_fxsave(buf); - } else { + else err = restore_i387_fsave(buf); - } } else { err = fpregs_soft_set(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, buf) != 0; } set_used_math(); + return err; } @@ -463,8 +467,8 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) */ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) { - int fpvalid; struct task_struct *tsk = current; + int fpvalid; fpvalid = !!used_math(); if (fpvalid) -- cgit v1.2.1 From 16281a998d7340a5bee4078f6f9a26c47208eb86 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 4 Mar 2008 16:46:27 -0800 Subject: x86: include/asm-x86/mutex_32.h - use angle brackets for include Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mutex_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index bbeefb96ddfd..9a6b3da25914 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h @@ -9,7 +9,7 @@ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H -#include "asm/alternative.h" +#include /** * __mutex_fastpath_lock - try to take the lock by moving the count -- cgit v1.2.1 From c1db29dbc761e9a464b417df7d4dbbae7df81f4c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 4 Mar 2008 16:47:00 -0800 Subject: x86: arch/x86/kernel/cpu/feature_names.c - use angle brackets for include Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/feature_names.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c index ee975ac6bbcb..e43ad4ad4cba 100644 --- a/arch/x86/kernel/cpu/feature_names.c +++ b/arch/x86/kernel/cpu/feature_names.c @@ -4,7 +4,7 @@ * This file must not contain any executable code. */ -#include "asm/cpufeature.h" +#include /* * These flag bits must match the definitions in . -- cgit v1.2.1 From a7113170214b569d24e413326a56c4cc5cc1a152 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 10:24:04 +0100 Subject: x86: remove DEBUG_SIG Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal_32.c | 12 ------------ arch/x86/kernel/signal_64.c | 25 ------------------------- 2 files changed, 37 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index add9c6e9c44d..f4ec6a092951 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -26,8 +26,6 @@ #include #include "sigframe.h" -#define DEBUG_SIG 0 - #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ @@ -412,11 +410,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); -#if DEBUG_SIG - printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", - current->comm, current->pid, frame, regs->ip, frame->pretcode); -#endif - return 0; give_sigsegv: @@ -505,11 +498,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); -#if DEBUG_SIG - printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", - current->comm, current->pid, frame, regs->ip, frame->pretcode); -#endif - return 0; give_sigsegv: diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 043294582f41..827179c5b32a 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -28,8 +28,6 @@ #include #include "sigframe.h" -#define DEBUG_SIG 0 - #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ @@ -142,10 +140,6 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; -#if DEBUG_SIG - printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax); -#endif - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) goto badframe; @@ -274,10 +268,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (err) goto give_sigsegv; -#if DEBUG_SIG - printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax); -#endif - /* Set up registers for signal handler */ regs->di = sig; /* In case the signal handler was declared without prototypes */ @@ -302,10 +292,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); -#if DEBUG_SIG - printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", - current->comm, current->pid, frame, regs->ip, frame->pretcode); -#endif return 0; @@ -353,12 +339,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, { int ret; -#if DEBUG_SIG - printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n", - current->pid, sig, - regs->ip, regs->sp, regs); -#endif - /* Are we from a system call? */ if (current_syscall(regs) >= 0) { /* If so, check system call restarting.. */ @@ -491,11 +471,6 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { -#if DEBUG_SIG - printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n", - thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current)); -#endif - /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { regs->flags |= X86_EFLAGS_TF; -- cgit v1.2.1 From 7e907f48980d6668f99206ba0dded40dca2d086f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 10:33:08 +0100 Subject: x86: clean up arch/x86/kernel/signal_32.c Before: total: 21 errors, 6 warnings, 665 lines checked After: total: 0 errors, 3 warnings, 685 lines checked No code changed: arch/x86/kernel/signal_32.o: text data bss dec hex filename 5333 0 4 5337 14d9 signal_32.o.before 5333 0 4 5337 14d9 signal_32.o.after md5: c279e98012a2808e90cfa2a7787e42a4 signal_32.o.before.asm c279e98012a2808e90cfa2a7787e42a4 signal_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal_32.c | 180 +++++++++++++++++++++++++------------------- 1 file changed, 101 insertions(+), 79 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index f4ec6a092951..3da3ffa39e9a 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -4,26 +4,29 @@ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes */ +#include -#include -#include -#include +#include +#include +#include #include +#include #include +#include +#include #include +#include #include -#include -#include -#include -#include -#include #include -#include +#include +#include + #include #include #include #include #include + #include "sigframe.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -55,10 +58,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask) current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; } -asmlinkage int +asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { @@ -67,10 +71,12 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, if (act) { old_sigset_t mask; + if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) return -EFAULT; + __get_user(new_ka.sa.sa_flags, &act->sa_flags); __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); @@ -83,6 +89,7 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) return -EFAULT; + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } @@ -90,10 +97,12 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, return ret; } -asmlinkage int -sys_sigaltstack(unsigned long bx) +asmlinkage int sys_sigaltstack(unsigned long bx) { - /* This is needed to make gcc realize it doesn't own the "struct pt_regs" */ + /* + * This is needed to make gcc realize it doesn't own the + * "struct pt_regs" + */ struct pt_regs *regs = (struct pt_regs *)&bx; const stack_t __user *uss = (const stack_t __user *)bx; stack_t __user *uoss = (stack_t __user *)regs->cx; @@ -129,7 +138,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, #define GET_SEG(seg) \ { unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ - loadsegment(seg,tmp); } + loadsegment(seg, tmp); } GET_SEG(gs); COPY_SEG(fs); @@ -139,16 +148,19 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, COPY(dx); COPY(cx); COPY(ip); COPY_SEG_STRICT(cs); COPY_SEG_STRICT(ss); - + { unsigned int tmpflags; + err |= __get_user(tmpflags, &sc->flags); - regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); + regs->flags = (regs->flags & ~FIX_EFLAGS) | + (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ } { - struct _fpstate __user * buf; + struct _fpstate __user *buf; + err |= __get_user(buf, &sc->fpstate); if (buf) { if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) @@ -156,6 +168,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, err |= restore_i387(buf); } else { struct task_struct *me = current; + if (used_math()) { clear_fpu(me); clear_used_math(); @@ -172,15 +185,17 @@ badframe: asmlinkage unsigned long sys_sigreturn(unsigned long __unused) { - struct pt_regs *regs = (struct pt_regs *) &__unused; - struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8); - sigset_t set; + struct sigframe __user *frame; + struct pt_regs *regs; unsigned long ax; + sigset_t set; + + regs = (struct pt_regs *) &__unused; + frame = (struct sigframe __user *)(regs->sp - 8); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; - if (__get_user(set.sig[0], &frame->sc.oldmask) - || (_NSIG_WORDS > 1 + if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))) goto badframe; @@ -190,7 +205,7 @@ asmlinkage unsigned long sys_sigreturn(unsigned long __unused) current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - + if (restore_sigcontext(regs, &frame->sc, &ax)) goto badframe; return ax; @@ -207,8 +222,9 @@ badframe: } force_sig(SIGSEGV, current); + return 0; -} +} asmlinkage int sys_rt_sigreturn(unsigned long __unused) { @@ -228,7 +244,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; @@ -240,12 +256,11 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) badframe: force_sig(SIGSEGV, current); return 0; -} +} /* * Set up a signal frame. */ - static int setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, struct pt_regs *regs, unsigned long mask) @@ -276,9 +291,9 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, tmp = save_i387(fpstate); if (tmp < 0) - err = 1; + err = 1; else - err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); + err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); @@ -291,7 +306,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, * Determine which stack to use.. */ static inline void __user * -get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; @@ -309,32 +324,37 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; - } - - /* This is the legacy signal stack switching. */ - else if ((regs->ss & 0xffff) != __USER_DS && - !(ka->sa.sa_flags & SA_RESTORER) && - ka->sa.sa_restorer) { - sp = (unsigned long) ka->sa.sa_restorer; + } else { + /* This is the legacy signal stack switching. */ + if ((regs->ss & 0xffff) != __USER_DS && + !(ka->sa.sa_flags & SA_RESTORER) && + ka->sa.sa_restorer) + sp = (unsigned long) ka->sa.sa_restorer; } sp -= frame_size; - /* Align the stack pointer according to the i386 ABI, - * i.e. so that on function entry ((sp + 4) & 15) == 0. */ + /* + * Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. + */ sp = ((sp + 4) & -16ul) - 4; + return (void __user *) sp; } -/* These symbols are defined with the addresses in the vsyscall page. - See vsyscall-sigreturn.S. */ +/* + * These symbols are defined with the addresses in the vsyscall page. + * See vsyscall-sigreturn.S. + */ extern void __user __kernel_sigreturn; extern void __user __kernel_rt_sigreturn; -static int setup_frame(int sig, struct k_sigaction *ka, - sigset_t *set, struct pt_regs * regs) +static int +setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, + struct pt_regs *regs) { - void __user *restorer; struct sigframe __user *frame; + void __user *restorer; int err = 0; int usig; @@ -373,9 +393,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up to return from userspace. */ err |= __put_user(restorer, &frame->pretcode); - + /* - * This is popl %eax ; movl $,%eax ; int $0x80 + * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice @@ -389,9 +409,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, goto give_sigsegv; /* Set up registers for signal handler */ - regs->sp = (unsigned long) frame; - regs->ip = (unsigned long) ka->sa.sa_handler; - regs->ax = (unsigned long) sig; + regs->sp = (unsigned long)frame; + regs->ip = (unsigned long)ka->sa.sa_handler; + regs->ax = (unsigned long)sig; regs->dx = 0; regs->cx = 0; @@ -418,10 +438,10 @@ give_sigsegv: } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs * regs) + sigset_t *set, struct pt_regs *regs) { - void __user *restorer; struct rt_sigframe __user *frame; + void __user *restorer; int err = 0; int usig; @@ -451,7 +471,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, - regs, set->sig[0]); + regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; @@ -461,9 +481,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); - + /* - * This is movl $,%ax ; int $0x80 + * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice @@ -477,11 +497,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, goto give_sigsegv; /* Set up registers for signal handler */ - regs->sp = (unsigned long) frame; - regs->ip = (unsigned long) ka->sa.sa_handler; - regs->ax = (unsigned long) usig; - regs->dx = (unsigned long) &frame->info; - regs->cx = (unsigned long) &frame->uc; + regs->sp = (unsigned long)frame; + regs->ip = (unsigned long)ka->sa.sa_handler; + regs->ax = (unsigned long)usig; + regs->dx = (unsigned long)&frame->info; + regs->cx = (unsigned long)&frame->uc; regs->ds = __USER_DS; regs->es = __USER_DS; @@ -506,9 +526,8 @@ give_sigsegv: } /* - * OK, we're invoking a handler - */ - + * OK, we're invoking a handler: + */ static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) @@ -551,16 +570,17 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); - if (ret == 0) { - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked,sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - } + if (ret) + return ret; - return ret; + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + return 0; } /* @@ -592,7 +612,8 @@ static void do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Re-enable any watchpoints before delivering the + /* + * Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. @@ -600,16 +621,17 @@ static void do_signal(struct pt_regs *regs) if (current->thread.debugreg7) set_debugreg(current->thread.debugreg7, 7); - /* Whee! Actually deliver the signal. */ + /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* a signal was successfully delivered; the saved + /* + * a signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ + * clear the TIF_RESTORE_SIGMASK flag + */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } - return; } @@ -645,8 +667,8 @@ static void do_signal(struct pt_regs *regs) * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ -void do_notify_resume(struct pt_regs *regs, void *unused, - __u32 thread_info_flags) +void +do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { @@ -660,6 +682,6 @@ void do_notify_resume(struct pt_regs *regs, void *unused, if (thread_info_flags & _TIF_HRTICK_RESCHED) hrtick_resched(); - + clear_thread_flag(TIF_IRET); } -- cgit v1.2.1 From eee6dd15723639f9270e4c561a0c82e8e18bd587 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 10:39:07 +0100 Subject: x86: move extern declaration to vdso.h Before: total: 0 errors, 3 warnings, 685 lines checked After: total: 0 errors, 1 warnings, 678 lines checked No code changed: arch/x86/kernel/signal_32.o: text data bss dec hex filename 5333 0 4 5337 14d9 signal_32.o.before 5333 0 4 5337 14d9 signal_32.o.after md5: c279e98012a2808e90cfa2a7787e42a4 signal_32.o.before.asm c279e98012a2808e90cfa2a7787e42a4 signal_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal_32.c | 7 ------- include/asm-x86/vdso.h | 7 +++++++ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 3da3ffa39e9a..ba168e5743be 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -342,13 +342,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) return (void __user *) sp; } -/* - * These symbols are defined with the addresses in the vsyscall page. - * See vsyscall-sigreturn.S. - */ -extern void __user __kernel_sigreturn; -extern void __user __kernel_rt_sigreturn; - static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 629bcb6e8e45..9bb86899abf0 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h @@ -25,4 +25,11 @@ extern const char VDSO32_PRELINK[]; (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) #endif +/* + * These symbols are defined with the addresses in the vsyscall page. + * See vsyscall-sigreturn.S. + */ +extern void __user __kernel_sigreturn; +extern void __user __kernel_rt_sigreturn; + #endif /* asm-x86/vdso.h */ -- cgit v1.2.1 From 97b44ae6cd8117212d41bedc433b5571ee3b79d9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 10:43:17 +0100 Subject: x86: add KERN_INFO to show_unhandled_signals printout Before: total: 0 errors, 1 warnings, 678 lines checked After: total: 0 errors, 0 warnings, 678 lines checked No code changed: arch/x86/kernel/signal_32.o: text data bss dec hex filename 5333 0 4 5337 14d9 signal_32.o.before 5336 0 4 5340 14dc signal_32.o.after md5: c279e98012a2808e90cfa2a7787e42a4 signal_32.o.before.asm c279e98012a2808e90cfa2a7787e42a4 signal_32.o.after.asm Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal_32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index ba168e5743be..aa1b6a0a22e4 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -212,13 +212,13 @@ asmlinkage unsigned long sys_sigreturn(unsigned long __unused) badframe: if (show_unhandled_signals && printk_ratelimit()) { - printk("%s%s[%d] bad frame in sigreturn frame:%p ip:%lx" - " sp:%lx oeax:%lx", + printk(KERN_INFO "%s%s[%d] bad frame in sigreturn frame:" + "%p ip:%lx sp:%lx oeax:%lx", task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, current->comm, task_pid_nr(current), frame, regs->ip, regs->sp, regs->orig_ax); print_vma_addr(" in ", regs->ip); - printk("\n"); + printk(KERN_CONT "\n"); } force_sig(SIGSEGV, current); -- cgit v1.2.1 From a7062211865efb53cda253d6e33a106f0fe20ebe Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 6 Mar 2008 01:11:11 -0800 Subject: x86: fix amd_detect_cmp for system with apicid lifting, boot cpu apicid will be 4 got: CPU: L1 I Cache: 64K (64 bytes/line), D cache 64K (64 bytes/line) CPU: L2 Cache: 512K (64 bytes/line) CPU 0/4 -> Node 0 CPU: Physical Processor ID: 1 CPU: Processor Core ID: 0 so try to offset apicid back before get phys_proc_id with bits shift. then we can get correct socket ID also remove remove cpu_data(0) reference. because cpu_data(0) only be ready after smp_prepare_cpus with the assignment from boot_cpu_data to current_cpu_data aka cpu_data(0). and check_bugs()==>identify_cpu(&boot_cpu_data) is quite before than smp_prepare_cpus. So just use boot_cpu_id instead. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index d65b73e63384..f303c70dd688 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -564,7 +564,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) /* Low order bits define the core id (index of core in socket) */ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); /* Convert the APIC ID into the socket ID */ - c->phys_proc_id = phys_pkg_id(bits); + c->phys_proc_id = (c->apicid - boot_cpu_id) >> bits; #ifdef CONFIG_NUMA node = c->phys_proc_id; @@ -581,7 +581,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) If that doesn't result in a usable node fall back to the path for the previous case. */ - int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); + int ht_nodeid = apicid - boot_cpu_id; if (ht_nodeid >= 0 && apicid_to_node[ht_nodeid] != NUMA_NO_NODE) -- cgit v1.2.1 From 282bfe21cf0e2af9eac052c89bcc0a5ace80352f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 6 Mar 2008 01:13:34 -0800 Subject: x86: show apicid for cpu in proc Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/proc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 15043a335ef1..e8e58c09625d 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -59,6 +59,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, cpus_weight(per_cpu(cpu_core_map, cpu))); seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + seq_printf(m, "apicid\t\t: %d\n", c->apicid); } #endif } -- cgit v1.2.1 From 77bf90ed66116a1fc0e2f0554ecac75a54290cc0 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Mon, 3 Mar 2008 11:37:23 -0800 Subject: x86: replace remaining __FUNCTION__ occurances __FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 8 ++++---- arch/x86/kernel/irq_32.c | 2 +- arch/x86/kernel/srat_32.c | 10 +++++----- arch/x86/kernel/summit_32.c | 12 ++++++------ arch/x86/lguest/boot.c | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 5fed98ca0e1f..e2d30b8e08a2 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -205,7 +205,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) struct alt_instr *a; char insnbuf[MAX_PATCH_LEN]; - DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); + DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); for (a = start; a < end; a++) { u8 *instr = a->instr; BUG_ON(a->replacementlen > a->instrlen); @@ -217,7 +217,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); DPRINTK("%s: vsyscall fixup: %p => %p\n", - __FUNCTION__, a->instr, instr); + __func__, a->instr, instr); } #endif memcpy(insnbuf, a->replacement, a->replacementlen); @@ -307,7 +307,7 @@ void alternatives_smp_module_add(struct module *mod, char *name, smp->text = text; smp->text_end = text_end; DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", - __FUNCTION__, smp->locks, smp->locks_end, + __func__, smp->locks, smp->locks_end, smp->text, smp->text_end, smp->name); spin_lock_irqsave(&smp_alt, flags); @@ -332,7 +332,7 @@ void alternatives_smp_module_del(struct module *mod) continue; list_del(&item->next); spin_unlock_irqrestore(&smp_alt, flags); - DPRINTK("%s: %s\n", __FUNCTION__, item->name); + DPRINTK("%s: %s\n", __func__, item->name); kfree(item); return; } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index cef054b09d27..6ea67b76a214 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -79,7 +79,7 @@ unsigned int do_IRQ(struct pt_regs *regs) if (unlikely((unsigned)irq >= NR_IRQS)) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", - __FUNCTION__, irq); + __func__, irq); BUG(); } diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c index b72e61359c36..70e4a374b4e8 100644 --- a/arch/x86/kernel/srat_32.c +++ b/arch/x86/kernel/srat_32.c @@ -277,14 +277,14 @@ int __init get_memcfg_from_srat(void) rsdp_address = acpi_os_get_root_pointer(); if (!rsdp_address) { printk("%s: System description tables not found\n", - __FUNCTION__); + __func__); goto out_err; } - printk("%s: assigning address to rsdp\n", __FUNCTION__); + printk("%s: assigning address to rsdp\n", __func__); rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address; if (!rsdp) { - printk("%s: Didn't find ACPI root!\n", __FUNCTION__); + printk("%s: Didn't find ACPI root!\n", __func__); goto out_err; } @@ -292,7 +292,7 @@ int __init get_memcfg_from_srat(void) rsdp->oem_id); if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) { - printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); + printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __func__); goto out_err; } @@ -302,7 +302,7 @@ int __init get_memcfg_from_srat(void) if (!rsdt) { printk(KERN_WARNING "%s: ACPI: Invalid root system description tables (RSDT)\n", - __FUNCTION__); + __func__); goto out_err; } diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index c7b579db843d..30f04c3e68e4 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -47,7 +47,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) } } if (i == rio_table_hdr->num_rio_dev) { - printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __FUNCTION__); + printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__); return last_bus; } @@ -58,7 +58,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) } } if (i == rio_table_hdr->num_scal_dev) { - printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __FUNCTION__); + printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__); return last_bus; } @@ -88,7 +88,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) num_buses = 9; break; default: - printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __FUNCTION__); + printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__); return last_bus; } @@ -103,13 +103,13 @@ static int __init build_detail_arrays(void) int i, scal_detail_size, rio_detail_size; if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { - printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __FUNCTION__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); + printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); return 0; } switch (rio_table_hdr->version) { default: - printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __FUNCTION__, rio_table_hdr->version); + printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version); return 0; case 2: scal_detail_size = 11; @@ -154,7 +154,7 @@ void __init setup_summit(void) offset = *((unsigned short *)(ptr + offset)); } if (!rio_table_hdr) { - printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __FUNCTION__); + printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__); return; } diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 3335b4595efd..af65b2da3ba0 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -661,7 +661,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, if (delta < LG_CLOCK_MIN_DELTA) { if (printk_ratelimit()) printk(KERN_DEBUG "%s: small delta %lu ns\n", - __FUNCTION__, delta); + __func__, delta); return -ETIME; } -- cgit v1.2.1 From e587cadd8f47e202a30712e2906a65a0606d5865 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 6 Mar 2008 08:48:49 -0500 Subject: x86: enhance DEBUG_RODATA support - alternatives Fix a memcpy that should be a text_poke (in apply_alternatives). Use kernel_wp_save/kernel_wp_restore in text_poke to support DEBUG_RODATA correctly and so the CPU HOTPLUG special case can be removed. Add text_poke_early, for alternatives and paravirt boot-time and module load time patching. Changelog: - Fix text_set and text_poke alignment check (mixed up bitwise and and or) - Remove text_set - Export add_nops, so it can be used by others. - Document text_poke_early. - Remove clflush, since it breaks some VIA architectures and is not strictly necessary. - Add kerneldoc to text_poke and text_poke_early. - Create a second vmap instead of using the WP bit to support Xen and VMI. - Move local_irq disable within text_poke and text_poke_early to be able to be sleepable in these functions. Signed-off-by: Mathieu Desnoyers CC: Andi Kleen CC: pageexec@freemail.hu CC: H. Peter Anvin CC: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 88 ++++++++++++++++++++++++++++++++----------- include/asm-x86/alternative.h | 23 ++++++++++- 2 files changed, 87 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index e2d30b8e08a2..0c92ad4d257a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #define MAX_PATCH_LEN (255-1) @@ -177,7 +179,7 @@ static const unsigned char*const * find_nop_table(void) #endif /* CONFIG_X86_64 */ /* Use this to add nops to a buffer, then text_poke the whole buffer. */ -static void add_nops(void *insns, unsigned int len) +void add_nops(void *insns, unsigned int len) { const unsigned char *const *noptable = find_nop_table(); @@ -190,6 +192,7 @@ static void add_nops(void *insns, unsigned int len) len -= noplen; } } +EXPORT_SYMBOL_GPL(add_nops); extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern u8 *__smp_locks[], *__smp_locks_end[]; @@ -223,7 +226,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) memcpy(insnbuf, a->replacement, a->replacementlen); add_nops(insnbuf + a->replacementlen, a->instrlen - a->replacementlen); - text_poke(instr, insnbuf, a->instrlen); + text_poke_early(instr, insnbuf, a->instrlen); } } @@ -284,7 +287,6 @@ void alternatives_smp_module_add(struct module *mod, char *name, void *text, void *text_end) { struct smp_alt_module *smp; - unsigned long flags; if (noreplace_smp) return; @@ -310,39 +312,37 @@ void alternatives_smp_module_add(struct module *mod, char *name, __func__, smp->locks, smp->locks_end, smp->text, smp->text_end, smp->name); - spin_lock_irqsave(&smp_alt, flags); + spin_lock(&smp_alt); list_add_tail(&smp->next, &smp_alt_modules); if (boot_cpu_has(X86_FEATURE_UP)) alternatives_smp_unlock(smp->locks, smp->locks_end, smp->text, smp->text_end); - spin_unlock_irqrestore(&smp_alt, flags); + spin_unlock(&smp_alt); } void alternatives_smp_module_del(struct module *mod) { struct smp_alt_module *item; - unsigned long flags; if (smp_alt_once || noreplace_smp) return; - spin_lock_irqsave(&smp_alt, flags); + spin_lock(&smp_alt); list_for_each_entry(item, &smp_alt_modules, next) { if (mod != item->mod) continue; list_del(&item->next); - spin_unlock_irqrestore(&smp_alt, flags); + spin_unlock(&smp_alt); DPRINTK("%s: %s\n", __func__, item->name); kfree(item); return; } - spin_unlock_irqrestore(&smp_alt, flags); + spin_unlock(&smp_alt); } void alternatives_smp_switch(int smp) { struct smp_alt_module *mod; - unsigned long flags; #ifdef CONFIG_LOCKDEP /* @@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp) return; BUG_ON(!smp && (num_online_cpus() > 1)); - spin_lock_irqsave(&smp_alt, flags); + spin_lock(&smp_alt); /* * Avoid unnecessary switches because it forces JIT based VMs to @@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp) mod->text, mod->text_end); } smp_mode = smp; - spin_unlock_irqrestore(&smp_alt, flags); + spin_unlock(&smp_alt); } #endif @@ -411,7 +411,7 @@ void apply_paravirt(struct paravirt_patch_site *start, /* Pad the rest with nops */ add_nops(insnbuf + used, p->len - used); - text_poke(p->instr, insnbuf, p->len); + text_poke_early(p->instr, insnbuf, p->len); } } extern struct paravirt_patch_site __start_parainstructions[], @@ -420,8 +420,6 @@ extern struct paravirt_patch_site __start_parainstructions[], void __init alternative_instructions(void) { - unsigned long flags; - /* The patching is not fully atomic, so try to avoid local interruptions that might execute the to be patched code. Other CPUs are not running. */ @@ -430,7 +428,6 @@ void __init alternative_instructions(void) stop_mce(); #endif - local_irq_save(flags); apply_alternatives(__alt_instructions, __alt_instructions_end); /* switch to patch-once-at-boottime-only mode and free the @@ -462,7 +459,6 @@ void __init alternative_instructions(void) } #endif apply_paravirt(__parainstructions, __parainstructions_end); - local_irq_restore(flags); if (smp_alt_once) free_init_pages("SMP alternatives", @@ -475,18 +471,64 @@ void __init alternative_instructions(void) #endif } -/* - * Warning: +/** + * text_poke_early - Update instructions on a live kernel at boot time + * @addr: address to modify + * @opcode: source of the copy + * @len: length to copy + * * When you use this code to patch more than one byte of an instruction * you need to make sure that other CPUs cannot execute this code in parallel. - * Also no thread must be currently preempted in the middle of these instructions. - * And on the local CPU you need to be protected again NMI or MCE handlers - * seeing an inconsistent instruction while you patch. + * Also no thread must be currently preempted in the middle of these + * instructions. And on the local CPU you need to be protected again NMI or MCE + * handlers seeing an inconsistent instruction while you patch. */ -void __kprobes text_poke(void *addr, unsigned char *opcode, int len) +void *text_poke_early(void *addr, const void *opcode, size_t len) { + unsigned long flags; + local_irq_save(flags); memcpy(addr, opcode, len); + local_irq_restore(flags); + sync_core(); + /* Could also do a CLFLUSH here to speed up CPU recovery; but + that causes hangs on some VIA CPUs. */ + return addr; +} + +/** + * text_poke - Update instructions on a live kernel + * @addr: address to modify + * @opcode: source of the copy + * @len: length to copy + * + * Only atomic text poke/set should be allowed when not doing early patching. + * It means the size must be writable atomically and the address must be aligned + * in a way that permits an atomic write. It also makes sure we fit on a single + * page. + */ +void *__kprobes text_poke(void *addr, const void *opcode, size_t len) +{ + unsigned long flags; + char *vaddr; + int nr_pages = 2; + + BUG_ON(len > sizeof(long)); + BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1)) + - ((long)addr & ~(sizeof(long) - 1))); + { + struct page *pages[2] = { virt_to_page(addr), + virt_to_page(addr + PAGE_SIZE) }; + if (!pages[1]) + nr_pages = 1; + vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); + BUG_ON(!vaddr); + local_irq_save(flags); + memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); + local_irq_restore(flags); + vunmap(vaddr); + } sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ + return addr; } diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index d8bacf3c4b08..d26416b5722c 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h @@ -156,6 +156,27 @@ apply_paravirt(struct paravirt_patch_site *start, #define __parainstructions_end NULL #endif -extern void text_poke(void *addr, unsigned char *opcode, int len); +extern void add_nops(void *insns, unsigned int len); + +/* + * Clear and restore the kernel write-protection flag on the local CPU. + * Allows the kernel to edit read-only pages. + * Side-effect: any interrupt handler running between save and restore will have + * the ability to write to read-only pages. + * + * Warning: + * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and + * no thread can be preempted in the instructions being modified (no iret to an + * invalid instruction possible) or if the instructions are changed from a + * consistent state to another consistent state atomically. + * More care must be taken when modifying code in the SMP case because of + * Intel's errata. + * On the local CPU you need to be protected again NMI or MCE handlers seeing an + * inconsistent instruction while you patch. + * The _early version expects the memory to already be RW. + */ + +extern void *text_poke(void *addr, const void *opcode, size_t len); +extern void *text_poke_early(void *addr, const void *opcode, size_t len); #endif /* _ASM_X86_ALTERNATIVE_H */ -- cgit v1.2.1 From 4e4eee0e0139811b36a07854dcfa9746bc8b16d3 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 2 Feb 2008 15:42:20 -0500 Subject: x86: enhance DEBUG_RODATA support for hotplug and kprobes Standardize DEBUG_RODATA, removing special cases for hotplug and kprobes. Signed-off-by: Mathieu Desnoyers Cc: Andi Kleen Cc: pageexec@freemail.hu Cc: akpm@linux-foundation.org CC: Andi Kleen CC: pageexec@freemail.hu CC: H. Peter Anvin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/mm/init_32.c | 24 ++++++++---------------- arch/x86/mm/init_64.c | 20 ++------------------ 2 files changed, 10 insertions(+), 34 deletions(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ee1091a46964..00168e65688a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -723,25 +723,17 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; -#ifndef CONFIG_KPROBES -#ifdef CONFIG_HOTPLUG_CPU - /* It must still be possible to apply SMP alternatives. */ - if (num_possible_cpus() <= 1) -#endif - { - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", - size >> 10); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + printk(KERN_INFO "Write protecting the kernel text: %luk\n", + size >> 10); #ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", - start, start+size); - set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); + printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", + start, start+size); + set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); - printk(KERN_INFO "Testing CPA: write protecting again\n"); - set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); -#endif - } + printk(KERN_INFO "Testing CPA: write protecting again\n"); + set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif start += size; size = (unsigned long)__end_rodata - start; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ae66d8d4c58d..4cb6c7f80f07 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -635,24 +635,7 @@ EXPORT_SYMBOL_GPL(rodata_test_data); void mark_rodata_ro(void) { - unsigned long start = (unsigned long)_stext, end; - -#ifdef CONFIG_HOTPLUG_CPU - /* It must still be possible to apply SMP alternatives. */ - if (num_possible_cpus() > 1) - start = (unsigned long)_etext; -#endif - -#ifdef CONFIG_KPROBES - start = (unsigned long)__start_rodata; -#endif - - end = (unsigned long)__end_rodata; - start = (start + PAGE_SIZE - 1) & PAGE_MASK; - end &= PAGE_MASK; - if (end <= start) - return; - + unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); @@ -675,6 +658,7 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif } + #endif #ifdef CONFIG_BLK_DEV_INITRD -- cgit v1.2.1 From 459cce726730ca0ac93701e53aa1d0d055ce9e90 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 Mar 2008 18:38:52 +0100 Subject: x86: remove mach_reboot.h all reboot details are handled in reboot.c and quirks are handled via reboot_fixups_32.c. Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/mach_reboot.h | 61 ------------------------------ 1 file changed, 61 deletions(-) delete mode 100644 include/asm-x86/mach-default/mach_reboot.h diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h deleted file mode 100644 index 6adee6a97dec..000000000000 --- a/include/asm-x86/mach-default/mach_reboot.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * arch/i386/mach-generic/mach_reboot.h - * - * Machine specific reboot functions for generic. - * Split out from reboot.c by Osamu Tomita - */ -#ifndef _MACH_REBOOT_H -#define _MACH_REBOOT_H - -static inline void kb_wait(void) -{ - int i; - - for (i = 0; i < 0x10000; i++) - if ((inb_p(0x64) & 0x02) == 0) - break; -} - -static inline void mach_reboot(void) -{ - int i; - - /* old method, works on most machines */ - for (i = 0; i < 10; i++) { - kb_wait(); - udelay(50); - outb(0xfe, 0x64); /* pulse reset low */ - udelay(50); - } - - /* New method: sets the "System flag" which, when set, indicates - * successful completion of the keyboard controller self-test (Basic - * Assurance Test, BAT). This is needed for some machines with no - * keyboard plugged in. This read-modify-write sequence sets only the - * system flag - */ - for (i = 0; i < 10; i++) { - int cmd; - - outb(0x20, 0x64); /* read Controller Command Byte */ - udelay(50); - kb_wait(); - udelay(50); - cmd = inb(0x60); - udelay(50); - kb_wait(); - udelay(50); - outb(0x60, 0x64); /* write Controller Command Byte */ - udelay(50); - kb_wait(); - udelay(50); - outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */ - udelay(50); - kb_wait(); - udelay(50); - outb(0xfe, 0x64); /* pulse reset low */ - udelay(50); - } -} - -#endif /* !_MACH_REBOOT_H */ -- cgit v1.2.1 From 01aaea1afbcdb7c49fe4a567ebe3e295db9f720d Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 6 Mar 2008 13:46:39 -0800 Subject: x86: introduce initial apicid store initial_apicid from early identify. it is could be different from phys_proc_id later. also print it out in /proc/cpuinfo. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 13 ++++++------- arch/x86/kernel/cpu/proc.c | 3 +++ arch/x86/kernel/setup_64.c | 11 ++++++----- include/asm-x86/processor.h | 1 + 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 57a46c36fa23..0dd87b8d6707 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -369,10 +369,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; + c->initial_apicid = (ebx >> 24) & 0xFF; #ifdef CONFIG_X86_HT - c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); + c->apicid = phys_pkg_id(c->initial_apicid, 0); + c->phys_proc_id = c->initial_apicid; #else - c->apicid = (ebx >> 24) & 0xFF; + c->apicid = c->initial_apicid; #endif if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; @@ -395,9 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) init_scattered_cpuid_features(c); } -#ifdef CONFIG_X86_HT - c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; -#endif } static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) @@ -554,7 +553,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) } index_msb = get_count_order(smp_num_siblings); - c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); + c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); @@ -565,7 +564,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) core_bits = get_count_order(c->x86_max_cores); - c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & + c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); if (c->x86_max_cores > 1) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index e8e58c09625d..0978a4a39418 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -19,6 +19,8 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, cpus_weight(per_cpu(cpu_core_map, cpu))); seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + seq_printf(m, "apicid\t\t: %d\n", c->apicid); + seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); } #endif } @@ -60,6 +62,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); seq_printf(m, "apicid\t\t: %d\n", c->apicid); + seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); } #endif } diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index f303c70dd688..13fe525bf065 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -562,9 +562,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) bits = c->x86_coreid_bits; /* Low order bits define the core id (index of core in socket) */ - c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); - /* Convert the APIC ID into the socket ID */ - c->phys_proc_id = (c->apicid - boot_cpu_id) >> bits; + c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); + /* Convert the initial APIC ID into the socket ID */ + c->phys_proc_id = c->initial_apicid >> bits; #ifdef CONFIG_NUMA node = c->phys_proc_id; @@ -581,7 +581,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) If that doesn't result in a usable node fall back to the path for the previous case. */ - int ht_nodeid = apicid - boot_cpu_id; + int ht_nodeid = c->initial_apicid; if (ht_nodeid >= 0 && apicid_to_node[ht_nodeid] != NUMA_NO_NODE) @@ -936,8 +936,9 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) c->x86 = 4; } + c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; #ifdef CONFIG_SMP - c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; + c->phys_proc_id = c->initial_apicid; #endif /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 1f9501a38493..d590da88c0df 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -101,6 +101,7 @@ struct cpuinfo_x86 { /* cpuid returned max cores value: */ u16 x86_max_cores; u16 apicid; + u16 initial_apicid; u16 x86_clflush_size; #ifdef CONFIG_SMP /* number of cores as seen by the OS: */ -- cgit v1.2.1 From 9a79cf9c1aa671325fa5ba37576c2cee23823d04 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 7 Mar 2008 19:17:55 -0800 Subject: x86: sort address_markers for dump_pagetables otherwise Vmemmap and High Kernel Mapping string is not showing up. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/mm/dump_pagetables.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 6d840338f808..6791b8334bc6 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -44,10 +44,10 @@ static struct addr_marker address_markers[] = { { 0x8000000000000000UL, "Kernel Space" }, { 0xffff810000000000UL, "Low Kernel Mapping" }, { VMALLOC_START, "vmalloc() Area" }, - { MODULES_VADDR, "Modules" }, - { MODULES_END, "End Modules" }, { VMEMMAP_START, "Vmemmap" }, { __START_KERNEL_map, "High Kernel Mapping" }, + { MODULES_VADDR, "Modules" }, + { MODULES_END, "End Modules" }, #else { PAGE_OFFSET, "Kernel Mapping" }, { 0/* VMALLOC_START */, "vmalloc() Area" }, -- cgit v1.2.1 From 537d916066f66de18dbca79adf82933cd12d2a36 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 7 Mar 2008 19:26:26 +0100 Subject: x86: coding style fixes to arch/x86/kernel/setup_32.c Fix: ERROR: do not initialise externals to 0 or NULL Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index fd639d9f79b6..d4ad6e8ae886 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -228,7 +228,7 @@ static inline void copy_edd(void) } #endif -int __initdata user_defined_memmap = 0; +int __initdata user_defined_memmap; /* * "mem=nopentium" disables the 4MB page tables. -- cgit v1.2.1 From e40c0fe6b0b5dd16aec3c0dad311d36b19d78fd9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 9 Mar 2008 12:35:00 -0700 Subject: x86: cleanup duplicate includes Signed-off-by: Joe Perches arch/x86/kernel/reboot.c | 1 - include/asm-x86/elf.h | 5 ++--- include/asm-x86/posix_types.h | 8 +------- include/asm-x86/processor.h | 3 +-- include/asm-x86/unistd.h | 8 +------- 5 files changed, 5 insertions(+), 20 deletions(-) Signed-off-by: Ingo Molnar --- arch/x86/kernel/reboot.c | 1 - include/asm-x86/elf.h | 5 ++--- include/asm-x86/posix_types.h | 8 +------- include/asm-x86/processor.h | 3 +-- include/asm-x86/unistd.h | 8 +------- 5 files changed, 5 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 484c4a80d38a..66cd4afc1e57 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -1,5 +1,4 @@ #include -#include #include #include #include diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index fb62f9941e38..77325646e2f4 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h @@ -82,8 +82,9 @@ extern unsigned int vdso_enabled; #define elf_check_arch_ia32(x) \ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) -#ifdef CONFIG_X86_32 #include + +#ifdef CONFIG_X86_32 #include /* for savesegment */ #include @@ -135,8 +136,6 @@ extern unsigned int vdso_enabled; #else /* CONFIG_X86_32 */ -#include - /* * This is used to ensure we don't load something for the wrong architecture. */ diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h index bb7133dc155d..fe312a5ba204 100644 --- a/include/asm-x86/posix_types.h +++ b/include/asm-x86/posix_types.h @@ -1,11 +1,5 @@ #ifdef __KERNEL__ -# ifdef CONFIG_X86_32 -# include "posix_types_32.h" -# else -# include "posix_types_64.h" -# endif -#else -# ifdef __i386__ +# if defined(CONFIG_X86_32) || defined(__i386__) # include "posix_types_32.h" # else # include "posix_types_64.h" diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index d590da88c0df..cc0268395ea2 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -3,8 +3,7 @@ #include -/* migration helpers, for KVM - will be removed in 2.6.25: */ -#include +/* migration helper, for KVM - will be removed in 2.6.25: */ #define Xgt_desc_struct desc_ptr /* Forward declaration, a strange C thing */ diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h index 2a58ed3e51d8..effc7ad8e12f 100644 --- a/include/asm-x86/unistd.h +++ b/include/asm-x86/unistd.h @@ -1,11 +1,5 @@ #ifdef __KERNEL__ -# ifdef CONFIG_X86_32 -# include "unistd_32.h" -# else -# include "unistd_64.h" -# endif -#else -# ifdef __i386__ +# if defined(CONFIG_X86_32) || defined(__i386__) # include "unistd_32.h" # else # include "unistd_64.h" -- cgit v1.2.1 From 86975101e46ec93be972d8f46715aa6273102545 Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Fri, 7 Mar 2008 13:05:27 -0800 Subject: x86: add cpu_has_arch_perfmon adds cpu_has_arch_perfmon to test presence of architectural perfmon on Intel x86 processor Signed-off-by: Stephane Eranian Signed-off-by: Ingo Molnar --- include/asm-x86/cpufeature.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 1e3102eeb823..90feb6f2562c 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -185,6 +185,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) +#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 -- cgit v1.2.1 From 12db648c1518b2627cc983199a97ec6f5d6a1de2 Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Fri, 7 Mar 2008 13:05:39 -0800 Subject: x86: add AMD Northbridge MSR definition adds AMD Northbridge config MSR definition Signed-off-by: Stephane Eranian Signed-off-by: Robert Richter Signed-off-by: Ingo Molnar --- include/asm-x86/msr-index.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index fae118a25278..3ed97144c07b 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -83,6 +83,7 @@ /* AMD64 MSRs. Not complete. See the architecture manual for a more complete list. */ +#define MSR_AMD64_NB_CFG 0xc001001f #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 -- cgit v1.2.1 From f694010185c429629ad5a65245da08103e611852 Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Mon, 10 Mar 2008 17:44:03 +0530 Subject: x86: Don't send RESCHEDULE_VECTOR to offlined cpus In the x86 native_smp_send_reschedule_function(), don't send the IPI if the cpu has gone offline already. Warn nevertheless!! Signed-off-by: Gautham R Shenoy Signed-off-by: Ingo Molnar --- arch/x86/kernel/smp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 88c1e518a203..16c52aaaca35 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -118,7 +118,10 @@ */ static void native_smp_send_reschedule(int cpu) { - WARN_ON(cpu_is_offline(cpu)); + if (unlikely(cpu_is_offline(cpu))) { + WARN_ON(1); + return; + } send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } -- cgit v1.2.1 From 5b0e508415a83989fe704b4718a1a214bc333ca7 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 10 Mar 2008 13:11:17 +0000 Subject: x86: prevent unconditional writes to DebugCtl MSR Otherwise, enabling (or better, subsequent disabling) of single stepping would cause a kernel oops on CPUs not having this MSR. The patch could have been added a conditional to the MSR write in user_disable_single_step(), but centralizing the updates seems safer and (looking forward) better manageable. Signed-off-by: Jan Beulich Cc: Markus Metzger Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 4 ++-- arch/x86/kernel/process_32.c | 4 ++-- arch/x86/kernel/process_64.c | 4 ++-- arch/x86/kernel/step.c | 2 +- include/asm-x86/processor.h | 9 +++++++++ 5 files changed, 16 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 34a591283f5d..1e3de7db9ad5 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -410,13 +410,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, static void __kprobes clear_btf(void) { if (test_thread_flag(TIF_DEBUGCTLMSR)) - wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); + update_debugctlmsr(0); } static void __kprobes restore_btf(void) { if (test_thread_flag(TIF_DEBUGCTLMSR)) - wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr); + update_debugctlmsr(current->thread.debugctlmsr); } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 9230ce060d09..ec05fb750dfc 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -564,12 +564,12 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, /* we clear debugctl to make sure DS * is not in use when we change it */ debugctl = 0; - wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); + update_debugctlmsr(0); wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); } if (next->debugctlmsr != debugctl) - wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); + update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { set_debugreg(next->debugreg0, 0); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 1ffce14cff6e..4f40272474dd 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -563,12 +563,12 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, /* we clear debugctl to make sure DS * is not in use when we change it */ debugctl = 0; - wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); + update_debugctlmsr(0); wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); } if (next->debugctlmsr != debugctl) - wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr); + update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { loaddebug(next, 0); diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 071ff4798236..92c20fee6781 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -148,7 +148,7 @@ static void write_debugctlmsr(struct task_struct *child, unsigned long val) if (child != current) return; - wrmsrl(MSR_IA32_DEBUGCTLMSR, val); + update_debugctlmsr(val); } /* diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index cc0268395ea2..40227c9bf51b 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -741,6 +741,15 @@ extern void switch_to_new_gdt(void); extern void cpu_init(void); extern void init_gdt(int cpu); +static inline void update_debugctlmsr(unsigned long debugctlmsr) +{ +#ifndef CONFIG_X86_DEBUGCTLMSR + if (boot_cpu_data.x86 < 6) + return; +#endif + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); +} + /* * from system description table in BIOS. Mostly for MCA use, but * others may find it useful: -- cgit v1.2.1 From 1415d160c7f7fe8f1026735d5b6cc19aec7a367f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 10 Mar 2008 21:10:57 +0100 Subject: x86: Remove redundant display of free swap space in show_mem() Signed-off-by: Johannes Weiner Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 3 --- arch/x86/mm/pgtable_32.c | 1 - 2 files changed, 4 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4cb6c7f80f07..255e51feb157 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -89,9 +89,6 @@ void show_mem(void) printk(KERN_INFO "Mem-info:\n"); show_free_areas(); - printk(KERN_INFO "Free swap: %6ldkB\n", - nr_swap_pages << (PAGE_SHIFT-10)); - for_each_online_pgdat(pgdat) { for (i = 0; i < pgdat->node_spanned_pages; ++i) { /* diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 76e4f4d26272..3165ec0672bd 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -36,7 +36,6 @@ void show_mem(void) printk(KERN_INFO "Mem-info:\n"); show_free_areas(); - printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_online_pgdat(pgdat) { pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; ++i) { -- cgit v1.2.1 From 5d570cbbf25a62e9c077f5b351fb142dbfc67288 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Tue, 11 Mar 2008 16:43:31 +0100 Subject: x86: correct/clarify comment in nops.h describes certain multibyte instructions as "generic" nops when in fact they aren't nops at all in 64-bit mode (missing REX.W causing truncation of a register). Update the comment to state that K8 or P6 style nops should be used in 64-bit mode. This matches what the alternatives code does. Signed-off-by: Mikael Pettersson Signed-off-by: Ingo Molnar --- include/asm-x86/nops.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index b3930ae539b3..ad0bedd10b89 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h @@ -5,6 +5,8 @@ /* generic versions from gas 1: nop + the following instructions are NOT nops in 64-bit mode, + for 64-bit mode use K8 or P6 nops instead 2: movl %esi,%esi 3: leal 0x00(%esi),%esi 4: leal 0x00(,%esi,1),%esi -- cgit v1.2.1 From 6079d2d5d11122eb52721f0f3c828952a490e6c1 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Tue, 11 Mar 2008 19:45:48 +0300 Subject: x86: move quad_local_to_mp_bus_id to numa.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 1 - arch/x86/pci/numa.c | 2 ++ include/asm-x86/mach-numaq/mach_mpparse.h | 2 ++ include/asm-x86/mpspec.h | 1 - 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index b2aded3fbfec..2abc9392799a 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -45,7 +45,6 @@ int apic_version [MAX_APICS]; int mp_bus_id_to_type [MAX_MP_BUSSES]; int mp_bus_id_to_node [MAX_MP_BUSSES]; int mp_bus_id_to_local [MAX_MP_BUSSES]; -int quad_local_to_mp_bus_id [NR_CPUS/4][4]; int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; static int mp_current_pci_id; diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index 55270c26237c..3248d3ee388f 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c @@ -13,6 +13,8 @@ #define BUS2QUAD(global) (mp_bus_id_to_node[global]) #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) + +int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) extern void *xquad_portio; /* Where the IO area was mapped */ diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 51bbac8fc0c2..cd21f289e6b7 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -12,6 +12,8 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); } +extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; + static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation) { diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 781ad74ab9e9..dbd63f8d4750 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -9,7 +9,6 @@ extern int mp_bus_id_to_type[MAX_MP_BUSSES]; extern int mp_bus_id_to_node[MAX_MP_BUSSES]; extern int mp_bus_id_to_local[MAX_MP_BUSSES]; -extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; -- cgit v1.2.1 From 8643f9d02a7bb9db74634b4c062d8e70ce7c59b9 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 19 Feb 2008 03:21:06 -0800 Subject: x86: get boot_cpu_id as early for k8_scan_nodes When acpi=off or there is no SRAT defined, apicid_to_node is got from K8 Northbridge PCI configuration space in k8_scan_nodes() in arch/x86_64/mm/k8toplogy.c. The problem is that it assumes bsp apic id is 0 at that point. For four socket system with Quad core cpus installed, all cpus apic id is offset by 4, and bsp apic id is 4. For eight socket system with dual core cpus installed, all cpus apic id is offset by 2, and bsp apic id is 2. We need get boot_cpu_id --- bsp apic id, before k8_scan_nodes by called. So create early_acpi_boot_init and early_get_smp_config for get boot_cpu_id. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 24 ++++++++++++ arch/x86/kernel/mpparse_64.c | 89 ++++++++++++++++++++++++++++++++------------ include/asm-x86/apic.h | 1 + include/asm-x86/mpspec.h | 3 ++ 4 files changed, 94 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index f6eb01d8923a..8a475793f736 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -861,6 +861,30 @@ static int __init detect_init_APIC(void) return 0; } +void __init early_init_lapic_mapping(void) +{ + unsigned long apic_phys; + + /* + * If no local APIC can be found then go out + * : it means there is no mpatable and MADT + */ + if (!smp_found_config) + return; + + apic_phys = mp_lapic_addr; + + set_fixmap_nocache(FIX_APIC_BASE, apic_phys); + apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", + APIC_BASE, apic_phys); + + /* + * Fetch the APIC ID of the BSP in case we have a + * default configuration (or the MP table is broken). + */ + boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); +} + /** * init_apic_mappings - initialize APIC mappings */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 2a1f7881c75b..529b1c22077e 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -224,8 +224,7 @@ static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) /* * Read/parse the MPC */ - -static int __init smp_read_mpc(struct mp_config_table *mpc) +static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) { char str[16]; int count=sizeof(*mpc); @@ -266,6 +265,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; + if (early) + return 1; + /* * Now process the configuration blocks. */ @@ -477,27 +479,38 @@ static struct intel_mp_floating *mpf_found; /* * Scan the memory blocks for an SMP configuration block. */ -void __init get_smp_config (void) +static void __init __get_smp_config(unsigned early) { struct intel_mp_floating *mpf = mpf_found; + if (acpi_lapic && early) + return; /* - * ACPI supports both logical (e.g. Hyper-Threading) and physical - * processors, where MPS only supports physical. - */ - if (acpi_lapic && acpi_ioapic) { - printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); - return; - } - else if (acpi_lapic) - printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. + */ + if (acpi_lapic && acpi_ioapic) { + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " + "information\n"); + return; + } else if (acpi_lapic) + printk(KERN_INFO "Using ACPI for processor (LAPIC) " + "configuration information\n"); - printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); + printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", + mpf->mpf_specification); /* * Now see if we need to read further. */ if (mpf->mpf_feature1 != 0) { + if (early) { + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + return; + } printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); construct_default_ISA_mptable(mpf->mpf_feature1); @@ -508,12 +521,15 @@ void __init get_smp_config (void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); return; } + + if (early) + return; /* * If there are no explicit MP IRQ entries, then we are * broken. We set up most of the low 16 IO-APIC pins to @@ -535,13 +551,25 @@ void __init get_smp_config (void) } else BUG(); - printk(KERN_INFO "Processors: %d\n", num_processors); + if (!early) + printk(KERN_INFO "Processors: %d\n", num_processors); /* * Only use the first configuration found. */ } -static int __init smp_scan_config (unsigned long base, unsigned long length) +void __init early_get_smp_config(void) +{ + __get_smp_config(1); +} + +void __init get_smp_config(void) +{ + __get_smp_config(0); +} + +static int __init smp_scan_config(unsigned long base, unsigned long length, + unsigned reserve) { extern void __bad_mpf_size(void); unsigned int *bp = phys_to_virt(base); @@ -560,10 +588,15 @@ static int __init smp_scan_config (unsigned long base, unsigned long length) || (mpf->mpf_specification == 4)) ) { smp_found_config = 1; + mpf_found = mpf; + + if (!reserve) + return 1; + reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); if (mpf->mpf_physptr) - reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE); - mpf_found = mpf; + reserve_bootmem_generic(mpf->mpf_physptr, + PAGE_SIZE); return 1; } bp += 4; @@ -572,7 +605,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length) return 0; } -void __init find_smp_config(void) +static void __init __find_smp_config(unsigned reserve) { unsigned int address; @@ -584,9 +617,9 @@ void __init find_smp_config(void) * 2) Scan the top 1K of base RAM * 3) Scan the 64K of bios */ - if (smp_scan_config(0x0,0x400) || - smp_scan_config(639*0x400,0x400) || - smp_scan_config(0xF0000,0x10000)) + if (smp_scan_config(0x0, 0x400, reserve) || + smp_scan_config(639*0x400, 0x400, reserve) || + smp_scan_config(0xF0000, 0x10000, reserve)) return; /* * If it is an SMP machine we should know now. @@ -603,13 +636,23 @@ void __init find_smp_config(void) address = *(unsigned short *)phys_to_virt(0x40E); address <<= 4; - if (smp_scan_config(address, 0x1000)) + if (smp_scan_config(address, 0x1000, reserve)) return; /* If we have come this far, we did not find an MP table */ printk(KERN_INFO "No mptable found.\n"); } +void __init early_find_smp_config(void) +{ + __find_smp_config(0); +} + +void __init find_smp_config(void) +{ + __find_smp_config(1); +} + /* -------------------------------------------------------------------------- ACPI-based MP Configuration -------------------------------------------------------------------------- */ diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index f0321a427e16..db5f7501aed6 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -129,6 +129,7 @@ extern void enable_NMI_through_LVT0(void); */ #ifdef CONFIG_X86_64 extern void setup_apic_routing(void); +extern void early_init_lapic_mapping(void); #endif extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index dbd63f8d4750..982550bef2cd 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -25,6 +25,9 @@ extern int pic_mode; extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); +extern void early_find_smp_config(void); +extern void early_get_smp_config(void); + #endif extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; -- cgit v1.2.1 From a6333c3ccbdc0ae001cff6ee1d3633942ef763f4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:09 +0300 Subject: x86: add mp_bus_not_pci bitmap to mpparse_32.c Signed-off: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++++ include/asm-x86/mpspec.h | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 2abc9392799a..c13092db578e 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -43,6 +43,7 @@ unsigned int __cpuinitdata maxcpus = NR_CPUS; */ int apic_version [MAX_APICS]; int mp_bus_id_to_type [MAX_MP_BUSSES]; +DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int mp_bus_id_to_node [MAX_MP_BUSSES]; int mp_bus_id_to_local [MAX_MP_BUSSES]; int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; @@ -240,12 +241,14 @@ static void __init MP_bus_info (struct mpc_config_bus *m) } #endif + set_bit(m->mpc_busid, mp_bus_not_pci); if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { mpc_oem_pci_bus(m, translation_table[mpc_record]); + clear_bit(m->mpc_busid, mp_bus_not_pci); mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; mp_current_pci_id++; @@ -984,6 +987,7 @@ void __init mp_config_acpi_legacy_irqs (void) * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; + set_bit(MP_ISA_BUS, mp_bus_not_pci); Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); /* diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 982550bef2cd..75df88e0a270 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -23,13 +23,12 @@ extern int pic_mode; /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) -extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); - extern void early_find_smp_config(void); extern void early_get_smp_config(void); #endif +extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; -- cgit v1.2.1 From d27e2b8e2ac34b62aca95d3cd7efe9708b718fde Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:18 +0300 Subject: x86: use not_pci bitmap #1 Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4ca548632c8d..b5680780fa95 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -810,10 +810,7 @@ static int __init find_isa_irq_pin(int irq, int type) for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].mpc_srcbus; - if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || - mp_bus_id_to_type[lbus] == MP_BUS_EISA || - mp_bus_id_to_type[lbus] == MP_BUS_MCA - ) && + if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].mpc_irqtype == type) && (mp_irqs[i].mpc_srcbusirq == irq)) -- cgit v1.2.1 From 73b2961bfa003518bb9cdd3c4003fad4d474ec13 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:24 +0300 Subject: x86: use not_pci bitmap #2 Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index b5680780fa95..f7f3f3144fda 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -826,10 +826,7 @@ static int __init find_isa_irq_apic(int irq, int type) for (i = 0; i < mp_irq_entries; i++) { int lbus = mp_irqs[i].mpc_srcbus; - if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || - mp_bus_id_to_type[lbus] == MP_BUS_EISA || - mp_bus_id_to_type[lbus] == MP_BUS_MCA - ) && + if (test_bit(lbus, mp_bus_not_pci) && (mp_irqs[i].mpc_irqtype == type) && (mp_irqs[i].mpc_srcbusirq == irq)) break; -- cgit v1.2.1 From 47cab822d6b25d8e130da00edef91ec3eef0efab Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:30 +0300 Subject: x86: use not_pci bitmap #3 Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index f7f3f3144fda..efe8322edfba 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -866,7 +866,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) mp_irqs[i].mpc_dstapic == MP_APIC_ALL) break; - if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) && + if (!test_bit(lbus, mp_bus_not_pci) && !mp_irqs[i].mpc_irqtype && (bus == lbus) && (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { -- cgit v1.2.1 From 6728801dce13f83ee7e1778b137ceebab61b71c4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:36 +0300 Subject: x86: use not_pci bitmap #4 Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 48 ++++++++++---------------------------------- 1 file changed, 11 insertions(+), 37 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index efe8322edfba..2a609edd59f0 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -929,19 +929,19 @@ static int EISA_ELCR(unsigned int irq) return 0; } +/* ISA interrupts are always polarity zero edge triggered, + * when listed as conforming in the MP table. */ + +#define default_ISA_trigger(idx) (0) +#define default_ISA_polarity(idx) (0) + /* EISA interrupts are always polarity zero and can be edge or level * trigger depending on the ELCR value. If an interrupt is listed as * EISA conforming in the MP table, that means its trigger type must * be read in from the ELCR */ #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) -#define default_EISA_polarity(idx) (0) - -/* ISA interrupts are always polarity zero edge triggered, - * when listed as conforming in the MP table. */ - -#define default_ISA_trigger(idx) (0) -#define default_ISA_polarity(idx) (0) +#define default_EISA_polarity(idx) default_ISA_polarity(idx) /* PCI interrupts are always polarity one level triggered, * when listed as conforming in the MP table. */ @@ -953,7 +953,7 @@ static int EISA_ELCR(unsigned int irq) * when listed as conforming in the MP table. */ #define default_MCA_trigger(idx) (1) -#define default_MCA_polarity(idx) (0) +#define default_MCA_polarity(idx) default_ISA_polarity(idx) static int MPBIOS_polarity(int idx) { @@ -967,35 +967,9 @@ static int MPBIOS_polarity(int idx) { case 0: /* conforms, ie. bus-type dependent polarity */ { - switch (mp_bus_id_to_type[bus]) - { - case MP_BUS_ISA: /* ISA pin */ - { - polarity = default_ISA_polarity(idx); - break; - } - case MP_BUS_EISA: /* EISA pin */ - { - polarity = default_EISA_polarity(idx); - break; - } - case MP_BUS_PCI: /* PCI pin */ - { - polarity = default_PCI_polarity(idx); - break; - } - case MP_BUS_MCA: /* MCA pin */ - { - polarity = default_MCA_polarity(idx); - break; - } - default: - { - printk(KERN_WARNING "broken BIOS!!\n"); - polarity = 1; - break; - } - } + polarity = test_bit(bus, mp_bus_not_pci)? + default_ISA_polarity(idx): + default_PCI_polarity(idx); break; } case 1: /* high active */ -- cgit v1.2.1 From 9c0076cb34a0ea968413d9a1ccb6c7c850d3a1ee Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:43 +0300 Subject: x86: use not_pci bitmap #5 Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 2a609edd59f0..e400f77eb0e0 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1010,11 +1010,14 @@ static int MPBIOS_trigger(int idx) { case 0: /* conforms, ie. bus-type dependent */ { + trigger = test_bit(bus, mp_bus_not_pci)? + default_ISA_trigger(idx): + default_PCI_trigger(idx); switch (mp_bus_id_to_type[bus]) { case MP_BUS_ISA: /* ISA pin */ { - trigger = default_ISA_trigger(idx); + /* set before the switch */ break; } case MP_BUS_EISA: /* EISA pin */ @@ -1024,7 +1027,7 @@ static int MPBIOS_trigger(int idx) } case MP_BUS_PCI: /* PCI pin */ { - trigger = default_PCI_trigger(idx); + /* set before the switch */ break; } case MP_BUS_MCA: /* MCA pin */ -- cgit v1.2.1 From 643befed1090af5f0000297ce11fa23e2777f42b Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:49 +0300 Subject: x86: use not_pci bitmap #6 Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 47 ++++++++++++++------------------------------ 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index e400f77eb0e0..97ac6104d63a 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1091,39 +1091,22 @@ static int pin_2_irq(int idx, int apic, int pin) if (mp_irqs[idx].mpc_dstirq != pin) printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); - switch (mp_bus_id_to_type[bus]) - { - case MP_BUS_ISA: /* ISA pin */ - case MP_BUS_EISA: - case MP_BUS_MCA: - { - irq = mp_irqs[idx].mpc_srcbusirq; - break; - } - case MP_BUS_PCI: /* PCI pin */ - { - /* - * PCI IRQs are mapped in order - */ - i = irq = 0; - while (i < apic) - irq += nr_ioapic_registers[i++]; - irq += pin; - - /* - * For MPS mode, so far only needed by ES7000 platform - */ - if (ioapic_renumber_irq) - irq = ioapic_renumber_irq(apic, irq); + if (test_bit(bus, mp_bus_not_pci)) + irq = mp_irqs[idx].mpc_srcbusirq; + else { + /* + * PCI IRQs are mapped in order + */ + i = irq = 0; + while (i < apic) + irq += nr_ioapic_registers[i++]; + irq += pin; - break; - } - default: - { - printk(KERN_ERR "unknown bus type %d.\n",bus); - irq = 0; - break; - } + /* + * For MPS mode, so far only needed by ES7000 platform + */ + if (ioapic_renumber_irq) + irq = ioapic_renumber_irq(apic, irq); } /* -- cgit v1.2.1 From 9e0a2de2684372f16130b753efdbf226a997efb0 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:54:56 +0300 Subject: x86: rearrange bus_type parse Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index c13092db578e..13adcc40a00e 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -242,16 +242,16 @@ static void __init MP_bus_info (struct mpc_config_bus *m) #endif set_bit(m->mpc_busid, mp_bus_not_pci); - if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; - } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; - } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { + if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { mpc_oem_pci_bus(m, translation_table[mpc_record]); clear_bit(m->mpc_busid, mp_bus_not_pci); mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; mp_current_pci_id++; + } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; + } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; } else { -- cgit v1.2.1 From c0a282c251181aa423d4831719613b8286b5b839 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 20 Mar 2008 14:55:02 +0300 Subject: x86: make mp_bus_id_to_type optional [ mingo@elte.hu: fix boot regression. ] Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 4 ++++ arch/x86/kernel/mpparse_32.c | 8 +++++++- include/asm-x86/mpspec.h | 6 +++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 97ac6104d63a..0d70acd3b134 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -915,6 +915,7 @@ void __init setup_ioapic_dest(void) } #endif +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) /* * EISA Edge/Level control register, ELCR */ @@ -928,6 +929,7 @@ static int EISA_ELCR(unsigned int irq) "Broken MPtable reports ISA irq %d\n", irq); return 0; } +#endif /* ISA interrupts are always polarity zero edge triggered, * when listed as conforming in the MP table. */ @@ -1013,6 +1015,7 @@ static int MPBIOS_trigger(int idx) trigger = test_bit(bus, mp_bus_not_pci)? default_ISA_trigger(idx): default_PCI_trigger(idx); +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) switch (mp_bus_id_to_type[bus]) { case MP_BUS_ISA: /* ISA pin */ @@ -1042,6 +1045,7 @@ static int MPBIOS_trigger(int idx) break; } } +#endif break; } case 1: /* edge */ diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 13adcc40a00e..8795122db9bc 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -42,7 +42,9 @@ unsigned int __cpuinitdata maxcpus = NR_CPUS; * MP-table. */ int apic_version [MAX_APICS]; +#if defined (CONFIG_MCA) || defined (CONFIG_EISA) int mp_bus_id_to_type [MAX_MP_BUSSES]; +#endif DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int mp_bus_id_to_node [MAX_MP_BUSSES]; int mp_bus_id_to_local [MAX_MP_BUSSES]; @@ -245,9 +247,10 @@ static void __init MP_bus_info (struct mpc_config_bus *m) if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { mpc_oem_pci_bus(m, translation_table[mpc_record]); clear_bit(m->mpc_busid, mp_bus_not_pci); - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; mp_current_pci_id++; +#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { @@ -256,6 +259,7 @@ static void __init MP_bus_info (struct mpc_config_bus *m) mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; } else { printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); +#endif } } @@ -983,10 +987,12 @@ void __init mp_config_acpi_legacy_irqs (void) int i = 0; int ioapic = -1; +#if defined (CONFIG_MCA) || defined (CONFIG_EISA) /* * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; +#endif set_bit(MP_ISA_BUS, mp_bus_not_pci); Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 75df88e0a270..5fa8b8bb7d00 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -6,7 +6,6 @@ #ifdef CONFIG_X86_32 #include -extern int mp_bus_id_to_type[MAX_MP_BUSSES]; extern int mp_bus_id_to_node[MAX_MP_BUSSES]; extern int mp_bus_id_to_local[MAX_MP_BUSSES]; @@ -28,7 +27,12 @@ extern void early_get_smp_config(void); #endif +#if defined(CONFIG_MCA) || defined(CONFIG_EISA) +extern int mp_bus_id_to_type[MAX_MP_BUSSES]; +#endif + extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); + extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; -- cgit v1.2.1 From e129cb490e842753b43af7aae136935fc0928dc8 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Tue, 11 Mar 2008 22:55:42 +0300 Subject: x86: move mp_bus_id_to_local to numa.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 1 - arch/x86/pci/numa.c | 2 ++ include/asm-x86/mach-numaq/mach_mpparse.h | 2 ++ include/asm-x86/mpspec.h | 1 - 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 8795122db9bc..ae385b427841 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -47,7 +47,6 @@ int mp_bus_id_to_type [MAX_MP_BUSSES]; #endif DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int mp_bus_id_to_node [MAX_MP_BUSSES]; -int mp_bus_id_to_local [MAX_MP_BUSSES]; int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; static int mp_current_pci_id; diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index 3248d3ee388f..e765558f7e1b 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c @@ -12,6 +12,8 @@ #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ #define BUS2QUAD(global) (mp_bus_id_to_node[global]) + +int mp_bus_id_to_local[MAX_MP_BUSSES]; #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) int quad_local_to_mp_bus_id [NR_CPUS/4][4]; diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index cd21f289e6b7..0917b95c42ac 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -1,6 +1,8 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H +extern int mp_bus_id_to_local[MAX_MP_BUSSES]; + static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) { diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 5fa8b8bb7d00..1c52bd8711f6 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -7,7 +7,6 @@ #include extern int mp_bus_id_to_node[MAX_MP_BUSSES]; -extern int mp_bus_id_to_local[MAX_MP_BUSSES]; extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; -- cgit v1.2.1 From 037cab07e9515149fecc2274775807f06ea6b036 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Tue, 11 Mar 2008 22:55:48 +0300 Subject: x86: move mp_bus_id_to_node to numa.c Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 1 - arch/x86/kernel/summit_32.c | 2 ++ arch/x86/pci/numa.c | 1 + include/asm-x86/mach-numaq/mach_mpparse.h | 1 + include/asm-x86/mpspec.h | 2 -- 5 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index ae385b427841..1b1fd6e920e6 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -46,7 +46,6 @@ int apic_version [MAX_APICS]; int mp_bus_id_to_type [MAX_MP_BUSSES]; #endif DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); -int mp_bus_id_to_node [MAX_MP_BUSSES]; int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; static int mp_current_pci_id; diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 30f04c3e68e4..6878a9c2df5d 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c @@ -35,6 +35,8 @@ static struct rio_table_hdr *rio_table_hdr __initdata; static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; +static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; + static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) { int twister = 0, node = 0; diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index e765558f7e1b..9e30378364ed 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c @@ -11,6 +11,7 @@ #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ +int mp_bus_id_to_node[MAX_MP_BUSSES]; #define BUS2QUAD(global) (mp_bus_id_to_node[global]) int mp_bus_id_to_local[MAX_MP_BUSSES]; diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 0917b95c42ac..254993ae04c7 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -2,6 +2,7 @@ #define __ASM_MACH_MPPARSE_H extern int mp_bus_id_to_local[MAX_MP_BUSSES]; +extern int mp_bus_id_to_node[MAX_MP_BUSSES]; static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 1c52bd8711f6..99da0a59b436 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -6,8 +6,6 @@ #ifdef CONFIG_X86_32 #include -extern int mp_bus_id_to_node[MAX_MP_BUSSES]; - extern unsigned int def_to_bigsmp; extern int apic_version[MAX_APICS]; extern u8 apicid_2_node[]; -- cgit v1.2.1 From b552da8740222c35bcd83c9be7b27185bfb6d53c Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Wed, 19 Mar 2008 15:58:11 +0100 Subject: x86 iommu: add more documentation Fix coding style in pci-dma_64.c and add stubs for documentation. I hope someone fills the rest, I understand maybe off and soft... Signed-off-by: Pavel Machek Signed-off-by: Ingo Molnar --- Documentation/kernel-parameters.txt | 13 +++++++++++++ arch/x86/kernel/pci-dma_64.c | 20 ++++++++++---------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index dafd001bf833..43c527d72f2f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -812,6 +812,19 @@ and is between 256 and 4096 characters. It is defined in the file inttest= [IA64] + iommu= [x86] + off + force + noforce + biomerge + panic + nopanic + merge + nomerge + forcesac + soft + + intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option off Disable intel iommu driver. diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index 375cb2bc45be..ada5a0604992 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c @@ -232,32 +232,32 @@ static __init int iommu_setup(char *p) return -EINVAL; while (*p) { - if (!strncmp(p,"off",3)) + if (!strncmp(p, "off", 3)) no_iommu = 1; /* gart_parse_options has more force support */ - if (!strncmp(p,"force",5)) + if (!strncmp(p, "force", 5)) force_iommu = 1; - if (!strncmp(p,"noforce",7)) { + if (!strncmp(p, "noforce", 7)) { iommu_merge = 0; force_iommu = 0; } - if (!strncmp(p, "biomerge",8)) { + if (!strncmp(p, "biomerge", 8)) { iommu_bio_merge = 4096; iommu_merge = 1; force_iommu = 1; } - if (!strncmp(p, "panic",5)) + if (!strncmp(p, "panic", 5)) panic_on_overflow = 1; - if (!strncmp(p, "nopanic",7)) + if (!strncmp(p, "nopanic", 7)) panic_on_overflow = 0; - if (!strncmp(p, "merge",5)) { + if (!strncmp(p, "merge", 5)) { iommu_merge = 1; force_iommu = 1; } - if (!strncmp(p, "nomerge",7)) + if (!strncmp(p, "nomerge", 7)) iommu_merge = 0; - if (!strncmp(p, "forcesac",8)) + if (!strncmp(p, "forcesac", 8)) iommu_sac_force = 1; if (!strncmp(p, "allowdac", 8)) forbid_dac = 0; @@ -265,7 +265,7 @@ static __init int iommu_setup(char *p) forbid_dac = -1; #ifdef CONFIG_SWIOTLB - if (!strncmp(p, "soft",4)) + if (!strncmp(p, "soft", 4)) swiotlb = 1; #endif -- cgit v1.2.1 From 26c6b5ea5575a5a4886dc45f889e7b783641f2de Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:24:56 -0300 Subject: x86: change var types in __inquire_remote_apic change some variables' types in __inquire_remote_apic to match x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 2dd95bae2b96..bea2d328a4f0 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -368,10 +368,10 @@ static void unmap_cpu_to_logical_apicid(int cpu) static inline void __inquire_remote_apic(int apicid) { - int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; + unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; char *names[] = { "ID", "VERSION", "SPIV" }; int timeout; - unsigned long status; + u32 status; printk("Inquiring remote APIC #%d...\n", apicid); -- cgit v1.2.1 From ba10485c2f5a9482d93b095960cbe7d865d59e04 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:24:57 -0300 Subject: x86: add loglevel to printks Add loglevel facilities to printks in __inquire_remote_apic. the levels are the ones to match x86_64 ones. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index bea2d328a4f0..8676eec2d00f 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -373,17 +373,18 @@ static inline void __inquire_remote_apic(int apicid) int timeout; u32 status; - printk("Inquiring remote APIC #%d...\n", apicid); + printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); for (i = 0; i < ARRAY_SIZE(regs); i++) { - printk("... APIC #%d %s: ", apicid, names[i]); + printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); /* * Wait for idle. */ status = safe_apic_wait_icr_idle(); if (status) - printk("a previous APIC delivery may have failed\n"); + printk(KERN_CONT + "a previous APIC delivery may have failed\n"); apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); @@ -397,10 +398,10 @@ static inline void __inquire_remote_apic(int apicid) switch (status) { case APIC_ICR_RR_VALID: status = apic_read(APIC_RRR); - printk("%lx\n", status); + printk(KERN_CONT "%08x\n", status); break; default: - printk("failed\n"); + printk(KERN_CONT "failed\n"); } } } -- cgit v1.2.1 From 1af8a0c1b3b85b1007f49917ea6a351b7aeb7562 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:24:58 -0300 Subject: x86: use apic_*_around instead of apic_write in x86_64 This patch replaces apic_read() for apic_read_around() and apic_write for apic_write_around() in smpboot_64.c We do it to have a common usage between x86_64 and i386. In the former, it will always simply expand to apic_write Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 4f6d9768648f..57ebe6c04305 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -281,8 +281,8 @@ static void inquire_remote_apic(int apicid) printk(KERN_CONT "a previous APIC delivery may have failed\n"); - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); - apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); + apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); timeout = 0; do { @@ -315,12 +315,12 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta /* * Turn INIT on target chip */ - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); /* * Send IPI */ - apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT + apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); Dprintk("Waiting for send to finish...\n"); @@ -331,10 +331,10 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta Dprintk("Deasserting INIT.\n"); /* Target chip */ - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); /* Send IPI */ - apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); + apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); Dprintk("Waiting for send to finish...\n"); send_status = safe_apic_wait_icr_idle(); @@ -353,6 +353,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta for (j = 1; j <= num_starts; j++) { Dprintk("Sending STARTUP #%d.\n",j); + apic_read_around(APIC_SPIV); apic_write(APIC_ESR, 0); apic_read(APIC_ESR); Dprintk("After apic_write.\n"); @@ -362,11 +363,11 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta */ /* Target chip */ - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); /* Boot on the stack */ /* Kick the second */ - apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12)); + apic_write_around(APIC_ICR, APIC_DM_STARTUP | (start_rip>>12)); /* * Give the other CPU some time to accept the IPI. @@ -386,6 +387,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta * Due to the Pentium erratum 3AP. */ if (maxlvt > 3) { + apic_read_around(APIC_SPIV); apic_write(APIC_ESR, 0); } accept_status = (apic_read(APIC_ESR) & 0xEF); -- cgit v1.2.1 From d0173aeac4f7fa90a63319b817bd207bdc0ac87e Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:24:59 -0300 Subject: x86: use start_ipi_hook in x86_64 It is used to match i386. The definition for the non-paravirt case is moved to smp.h instead of smp_32.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 8 ++++++++ include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 4 ---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 57ebe6c04305..13ab1123d1d2 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -344,6 +344,14 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta num_starts = 2; + /* + * Paravirt / VMI wants a startup IPI hook here to set up the + * target processor state. + */ + startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, + (unsigned long) init_rsp); + + /* * Run STARTUP IPI loop. */ diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 513c8571a4a0..4dc271b43767 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -33,6 +33,9 @@ struct smp_ops { extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP +#ifndef CONFIG_PARAVIRT +#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) +#endif extern struct smp_ops smp_ops; static inline void smp_send_stop(void) diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 4fec2feb6ac8..76740def6092 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -30,10 +30,6 @@ DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); #ifdef CONFIG_SMP -#ifndef CONFIG_PARAVIRT -#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) -#endif - /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), -- cgit v1.2.1 From 82389871b89467b4478e02d13be2f776e0138b5b Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:00 -0300 Subject: x86: add an smp_apply_quirks to smpboot_32.c The split of smp_store_cpu_info in a quirks-only part will ease integration with x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 8676eec2d00f..e05006416d8c 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -78,19 +78,8 @@ static void map_cpu_to_logical_apicid(void); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -/* - * The bootstrap kernel entry code has set these up. Save them for - * a given CPU - */ - -void __cpuinit smp_store_cpu_info(int id) +static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) { - struct cpuinfo_x86 *c = &cpu_data(id); - - *c = boot_cpu_data; - c->cpu_index = id; - if (id!=0) - identify_secondary_cpu(c); /* * Mask B, Pentium, but not Pentium MMX */ @@ -138,6 +127,23 @@ void __cpuinit smp_store_cpu_info(int id) valid_k7: ; + +} + +/* + * The bootstrap kernel entry code has set these up. Save them for + * a given CPU + */ + +void __cpuinit smp_store_cpu_info(int id) +{ + struct cpuinfo_x86 *c = &cpu_data(id); + + *c = boot_cpu_data; + c->cpu_index = id; + if (id != 0) + identify_secondary_cpu(c); + smp_apply_quirks(c); } static atomic_t init_deasserted; -- cgit v1.2.1 From 4f3ab1959a63a2686524f17665d799fac28eb271 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:01 -0300 Subject: x86: decouple call to print_cpu_info from smp_store_cpu_info This will ease integration with i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 13ab1123d1d2..1da28c6c1f5f 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -97,7 +97,6 @@ static void __cpuinit smp_store_cpu_info(int id) *c = boot_cpu_data; c->cpu_index = id; identify_cpu(c); - print_cpu_info(c); } static inline void wait_for_init_deassert(atomic_t *deassert) @@ -568,6 +567,8 @@ do_rest: if (cpu_isset(cpu, cpu_callin_map)) { /* number CPUs logically, starting from 1 (BSP is 0) */ Dprintk("CPU has booted.\n"); + printk(KERN_INFO "CPU%d: ", cpu); + print_cpu_info(&cpu_data(cpu)); } else { boot_error = 1; if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) @@ -751,6 +752,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) */ setup_boot_clock(); + printk(KERN_INFO "CPU%d: ", 0); + print_cpu_info(&cpu_data(0)); } /* -- cgit v1.2.1 From 7a636af66404766df60acff88c475df8e8d79347 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:02 -0300 Subject: x86: provide specialized identification routines for x86_64 provide two specialized identify_secondary_cpu() and identify_boot_cpu() routines for x86_64. Although not strictly needed, they are functionally correct, and will ease integration with i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 13fe525bf065..990724143c43 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -1036,14 +1036,24 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) #endif select_idle_routine(c); - if (c != &boot_cpu_data) - mtrr_ap_init(); #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif } +void __cpuinit identify_boot_cpu(void) +{ + identify_cpu(&boot_cpu_data); +} + +void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) +{ + BUG_ON(c == &boot_cpu_data); + identify_cpu(c); + mtrr_ap_init(); +} + static __init int setup_noclflush(char *arg) { setup_clear_cpu_cap(X86_FEATURE_CLFLSH); -- cgit v1.2.1 From 5745abfe86841a97fbe9e3f1e4f881a01b0c5f5b Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:03 -0300 Subject: x86: use identify_boot_cpu Call this function instead of identify_cpu in bugs_64.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/bugs_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/bugs_64.c b/arch/x86/kernel/bugs_64.c index 8f520f93ffd4..60207e999a04 100644 --- a/arch/x86/kernel/bugs_64.c +++ b/arch/x86/kernel/bugs_64.c @@ -12,7 +12,7 @@ void __init check_bugs(void) { - identify_cpu(&boot_cpu_data); + identify_boot_cpu(); #if !defined(CONFIG_SMP) printk("CPU: "); print_cpu_info(&boot_cpu_data); -- cgit v1.2.1 From f7401f7fe653f90f8f80a241840b9b499779e87d Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:04 -0300 Subject: x86: call identify_secondary_cpu in smp_store_cpu_info Call it conditionally for secondary cpus. This behaviour matches i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 1da28c6c1f5f..f84e30da521a 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -96,7 +96,8 @@ static void __cpuinit smp_store_cpu_info(int id) *c = boot_cpu_data; c->cpu_index = id; - identify_cpu(c); + if (id != 0) + identify_secondary_cpu(c); } static inline void wait_for_init_deassert(atomic_t *deassert) -- cgit v1.2.1 From 1d89a7f072d4f76f0538edfb474d527066ee7838 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:05 -0300 Subject: x86: merge smp_store_cpu_info now that it is the same between arches, put it into smpboot.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 77 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 71 +--------------------------------------- arch/x86/kernel/smpboot_64.c | 15 --------- include/asm-x86/smp.h | 2 ++ include/asm-x86/smp_32.h | 2 -- 5 files changed, 80 insertions(+), 87 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b13b9d55f9ce..a157a5245923 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -45,6 +45,83 @@ unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +#ifdef CONFIG_X86_32 +/* Set if we find a B stepping CPU */ +int __cpuinitdata smp_b_stepping; +#endif + +static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_X86_32 + /* + * Mask B, Pentium, but not Pentium MMX + */ + if (c->x86_vendor == X86_VENDOR_INTEL && + c->x86 == 5 && + c->x86_mask >= 1 && c->x86_mask <= 4 && + c->x86_model <= 3) + /* + * Remember we have B step Pentia with bugs + */ + smp_b_stepping = 1; + + /* + * Certain Athlons might work (for various values of 'work') in SMP + * but they are not certified as MP capable. + */ + if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { + + if (num_possible_cpus() == 1) + goto valid_k7; + + /* Athlon 660/661 is valid. */ + if ((c->x86_model == 6) && ((c->x86_mask == 0) || + (c->x86_mask == 1))) + goto valid_k7; + + /* Duron 670 is valid */ + if ((c->x86_model == 7) && (c->x86_mask == 0)) + goto valid_k7; + + /* + * Athlon 662, Duron 671, and Athlon >model 7 have capability + * bit. It's worth noting that the A5 stepping (662) of some + * Athlon XP's have the MP bit set. + * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for + * more. + */ + if (((c->x86_model == 6) && (c->x86_mask >= 2)) || + ((c->x86_model == 7) && (c->x86_mask >= 1)) || + (c->x86_model > 7)) + if (cpu_has_mp) + goto valid_k7; + + /* If we get here, not a certified SMP capable AMD system. */ + add_taint(TAINT_UNSAFE_SMP); + } + +valid_k7: + ; +#endif +} + +/* + * The bootstrap kernel entry code has set these up. Save them for + * a given CPU + */ + +void __cpuinit smp_store_cpu_info(int id) +{ + struct cpuinfo_x86 *c = &cpu_data(id); + + *c = boot_cpu_data; + c->cpu_index = id; + if (id != 0) + identify_secondary_cpu(c); + smp_apply_quirks(c); +} + + void __cpuinit set_cpu_sibling_map(int cpu) { int i; diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index e05006416d8c..0bfb31e13540 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -59,8 +59,7 @@ #include #include -/* Set if we find a B stepping CPU */ -static int __cpuinitdata smp_b_stepping; +extern int smp_b_stepping; static cpumask_t smp_commenced_mask; @@ -78,74 +77,6 @@ static void map_cpu_to_logical_apicid(void); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) -{ - /* - * Mask B, Pentium, but not Pentium MMX - */ - if (c->x86_vendor == X86_VENDOR_INTEL && - c->x86 == 5 && - c->x86_mask >= 1 && c->x86_mask <= 4 && - c->x86_model <= 3) - /* - * Remember we have B step Pentia with bugs - */ - smp_b_stepping = 1; - - /* - * Certain Athlons might work (for various values of 'work') in SMP - * but they are not certified as MP capable. - */ - if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { - - if (num_possible_cpus() == 1) - goto valid_k7; - - /* Athlon 660/661 is valid. */ - if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) - goto valid_k7; - - /* Duron 670 is valid */ - if ((c->x86_model==7) && (c->x86_mask==0)) - goto valid_k7; - - /* - * Athlon 662, Duron 671, and Athlon >model 7 have capability bit. - * It's worth noting that the A5 stepping (662) of some Athlon XP's - * have the MP bit set. - * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more. - */ - if (((c->x86_model==6) && (c->x86_mask>=2)) || - ((c->x86_model==7) && (c->x86_mask>=1)) || - (c->x86_model> 7)) - if (cpu_has_mp) - goto valid_k7; - - /* If we get here, it's not a certified SMP capable AMD system. */ - add_taint(TAINT_UNSAFE_SMP); - } - -valid_k7: - ; - -} - -/* - * The bootstrap kernel entry code has set these up. Save them for - * a given CPU - */ - -void __cpuinit smp_store_cpu_info(int id) -{ - struct cpuinfo_x86 *c = &cpu_data(id); - - *c = boot_cpu_data; - c->cpu_index = id; - if (id != 0) - identify_secondary_cpu(c); - smp_apply_quirks(c); -} - static atomic_t init_deasserted; static void __cpuinit smp_callin(void) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index f84e30da521a..c213345ca2f5 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -85,21 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) #endif -/* - * The bootstrap kernel entry code has set these up. Save them for - * a given CPU - */ - -static void __cpuinit smp_store_cpu_info(int id) -{ - struct cpuinfo_x86 *c = &cpu_data(id); - - *c = boot_cpu_data; - c->cpu_index = id; - if (id != 0) - identify_secondary_cpu(c); -} - static inline void wait_for_init_deassert(atomic_t *deassert) { while (!atomic_read(deassert)) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 4dc271b43767..b4c5143d7f8d 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -88,6 +88,8 @@ extern void prefill_possible_map(void); #define SMP_TRAMPOLINE_BASE 0x6000 extern unsigned long setup_trampoline(void); + +void smp_store_cpu_info(int id); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 76740def6092..51624abda43a 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -42,8 +42,6 @@ DECLARE_PER_CPU(int, cpu_number); extern int safe_smp_processor_id(void); -void __cpuinit smp_store_cpu_info(int id); - /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { -- cgit v1.2.1 From 3b22ec7b13cb31e0d87fbc0aabe14caaaad309e8 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:06 -0300 Subject: x86: always enable irqs when entering idle This matches x86_64 behaviour, which is a superior one IMHO Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index ec05fb750dfc..08c41ed5e805 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -127,6 +127,7 @@ void default_idle(void) local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { + local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } @@ -142,6 +143,7 @@ EXPORT_SYMBOL(default_idle); */ static void poll_idle(void) { + local_irq_enable(); cpu_relax(); } @@ -248,8 +250,11 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) - __mwait(ax, cx); - } + __sti_mwait(ax, cx); + else + local_irq_enable(); + } else + local_irq_enable(); } /* Default MONITOR/MWAIT with no hints, used for default C1 state */ -- cgit v1.2.1 From 3ff8171feecfcdee46be7d6e92259debe46ac87f Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:07 -0300 Subject: x86: don't call local_irq_enable before entering idle the call to idle is guaranteed to do it. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 0bfb31e13540..6c16165abf11 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -214,9 +214,6 @@ static void __cpuinit start_secondary(void *unused) unlock_ipi_call_lock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; - /* We can take interrupts now: we're officially "up". */ - local_irq_enable(); - wmb(); cpu_idle(); } -- cgit v1.2.1 From 8f15b82e22779fd44baeb78515c9c154b407eff0 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:08 -0300 Subject: x86: move setup_secondary_clock a little bit down in the function This is done so we call setup_secondary_clock() in the same place x86_64 does. A separate patch for this is appearantly not needed. But clock initialization is such a delicate thing, that it's safer to do this way Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 6c16165abf11..4e5416eb42b0 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -185,7 +185,6 @@ static void __cpuinit start_secondary(void *unused) */ check_tsc_sync_target(); - setup_secondary_clock(); if (nmi_watchdog == NMI_IO_APIC) { disable_8259A_irq(0); enable_NMI_through_LVT0(); @@ -214,6 +213,8 @@ static void __cpuinit start_secondary(void *unused) unlock_ipi_call_lock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; + setup_secondary_clock(); + wmb(); cpu_idle(); } -- cgit v1.2.1 From 5733f627498121870b081b7ab73ffba01348f8cd Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:09 -0300 Subject: x86: move state update out of ipi_lock it does not need to be inside lock. Do the way i386 does. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index c213345ca2f5..cfcfd2c2062b 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -233,10 +233,10 @@ void __cpuinit start_secondary(void) */ spin_unlock(&vector_lock); cpu_set(smp_processor_id(), cpu_online_map); - per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; - unlock_ipi_call_lock(); + per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; + setup_secondary_clock(); cpu_idle(); -- cgit v1.2.1 From ac56ef61a1f65aaf1cb31dca2a407322c5f0a4dd Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:10 -0300 Subject: x86: provide APIC_INTEGRATED definition for x86_64 it is always integrated, so define as 1. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/apicdef.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 550af7a6f88e..b0c6b285c916 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -22,7 +22,11 @@ #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFFu) #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) -#define APIC_INTEGRATED(x) ((x)&0xF0u) +#ifdef CONFIG_X86_32 +# define APIC_INTEGRATED(x) ((x)&0xF0u) +#else +# define APIC_INTEGRATED(x) (1) +#endif #define APIC_XAPIC(x) ((x) >= 0x14) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFFu -- cgit v1.2.1 From 148a30f8cf2ac72b7ea6e5c8971fad8d80ec3879 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:11 -0300 Subject: x86: use APIC_INTEGRATED tests in x86_64 This patch does not change the behaviour of x86_64, since APIC_INTEGRATED is always defined as (1). But the code now matches exactly i386 version (well, this part of the code, at least) Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index cfcfd2c2062b..d7b59d6c6963 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -295,6 +295,15 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta unsigned long send_status, accept_status = 0; int maxlvt, num_starts, j; + /* + * Be paranoid about clearing APIC errors. + */ + if (APIC_INTEGRATED(apic_version[phys_apicid])) { + apic_read_around(APIC_SPIV); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + } + Dprintk("Asserting INIT.\n"); /* @@ -327,7 +336,10 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta mb(); atomic_set(&init_deasserted, 1); - num_starts = 2; + if (APIC_INTEGRATED(apic_version[phys_apicid])) + num_starts = 2; + else + num_starts = 0; /* * Paravirt / VMI wants a startup IPI hook here to set up the -- cgit v1.2.1 From fa8004d8e0c2ba21a44bdc95c44c6c5267a991ec Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:12 -0300 Subject: x86: add barriers statement goal is to have i386 and x86_64 closer, so we add barriers to match Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 4 ++++ arch/x86/kernel/smpboot_64.c | 1 + 2 files changed, 5 insertions(+) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 4e5416eb42b0..a232f4d1f7a5 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -180,6 +180,9 @@ static void __cpuinit start_secondary(void *unused) smp_callin(); while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) cpu_relax(); + + /* otherwise gcc will move up smp_processor_id before the cpu_init */ + barrier(); /* * Check TSC synchronization with the BP: */ @@ -432,6 +435,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) Dprintk("Waiting for send to finish...\n"); send_status = safe_apic_wait_icr_idle(); + mb(); atomic_set(&init_deasserted, 1); /* diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index d7b59d6c6963..a9cc91127b91 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -239,6 +239,7 @@ void __cpuinit start_secondary(void) setup_secondary_clock(); + wmb(); cpu_idle(); } -- cgit v1.2.1 From 4780b261e2b71ca72804b26479d794839e68d9ab Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:13 -0300 Subject: x86: isolate sanity checking Isolate all sanity checking in a smp_sanity_check() function as x86_64 does. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 57 ++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index a232f4d1f7a5..b44a743d0ea9 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -735,10 +735,6 @@ exit: } #endif -/* - * Cycle through the processors sending APIC IPIs to boot each. - */ - static int boot_cpu_logical_apicid; /* Where the IO area was mapped on multiquad, always 0 otherwise */ void *xquad_portio; @@ -746,26 +742,8 @@ void *xquad_portio; EXPORT_SYMBOL(xquad_portio); #endif -static void __init smp_boot_cpus(unsigned int max_cpus) +static int __init smp_sanity_check(unsigned max_cpus) { - int apicid, cpu, bit, kicked; - unsigned long bogosum = 0; - - /* - * Setup boot CPU information - */ - smp_store_cpu_info(0); /* Final full version of the data */ - printk("CPU%d: ", 0); - print_cpu_info(&cpu_data(0)); - - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); - boot_cpu_logical_apicid = logical_smp_processor_id(); - per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid; - - current_thread_info()->cpu = 0; - - set_cpu_sibling_map(0); - /* * If we couldn't find an SMP configuration at boot time, * get out of here now! @@ -780,7 +758,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) map_cpu_to_logical_apicid(); cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0)); - return; + return -1; } /* @@ -806,7 +784,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) map_cpu_to_logical_apicid(); cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0)); - return; + return -1; } verify_local_APIC(); @@ -828,9 +806,36 @@ static void __init smp_boot_cpus(unsigned int max_cpus) map_cpu_to_logical_apicid(); cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0)); - return; + return -1; } + return 0; +} + + +/* + * Cycle through the processors sending APIC IPIs to boot each. + */ +static void __init smp_boot_cpus(unsigned int max_cpus) +{ + int apicid, cpu, bit, kicked; + unsigned long bogosum = 0; + + /* + * Setup boot CPU information + */ + smp_store_cpu_info(0); /* Final full version of the data */ + printk(KERN_INFO "CPU%d: ", 0); + print_cpu_info(&cpu_data(0)); + + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_logical_apicid = logical_smp_processor_id(); + per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid; + + current_thread_info()->cpu = 0; + + set_cpu_sibling_map(0); + smp_sanity_check(max_cpus); connect_bsp_APIC(); setup_local_APIC(); map_cpu_to_logical_apicid(); -- cgit v1.2.1 From b675f11127291cdb6a090ece289e4f9a0b1d609e Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:14 -0300 Subject: x86: isolate logic to disable smp Put it in a disable_smp() function, as x86_64 does Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index b44a743d0ea9..8144aa3a1edf 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -742,6 +742,15 @@ void *xquad_portio; EXPORT_SYMBOL(xquad_portio); #endif +static void __init disable_smp(void) +{ + smpboot_clear_io_apic_irqs(); + phys_cpu_present_map = physid_mask_of_physid(0); + map_cpu_to_logical_apicid(); + cpu_set(0, per_cpu(cpu_sibling_map, 0)); + cpu_set(0, per_cpu(cpu_core_map, 0)); +} + static int __init smp_sanity_check(unsigned max_cpus) { /* @@ -750,14 +759,10 @@ static int __init smp_sanity_check(unsigned max_cpus) */ if (!smp_found_config && !acpi_lapic) { printk(KERN_NOTICE "SMP motherboard not detected.\n"); - smpboot_clear_io_apic_irqs(); - phys_cpu_present_map = physid_mask_of_physid(0); + disable_smp(); if (APIC_init_uniprocessor()) printk(KERN_NOTICE "Local APIC not detected." " Using dummy APIC emulation.\n"); - map_cpu_to_logical_apicid(); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); return -1; } @@ -779,11 +784,6 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); - smpboot_clear_io_apic_irqs(); - phys_cpu_present_map = physid_mask_of_physid(0); - map_cpu_to_logical_apicid(); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); return -1; } @@ -801,11 +801,6 @@ static int __init smp_sanity_check(unsigned max_cpus) connect_bsp_APIC(); setup_local_APIC(); } - smpboot_clear_io_apic_irqs(); - phys_cpu_present_map = physid_mask_of_physid(0); - map_cpu_to_logical_apicid(); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); return -1; } return 0; @@ -835,7 +830,12 @@ static void __init smp_boot_cpus(unsigned int max_cpus) set_cpu_sibling_map(0); - smp_sanity_check(max_cpus); + if (smp_sanity_check(max_cpus) < 0) { + printk(KERN_INFO "SMP disabled\n"); + disable_smp(); + return; + } + connect_bsp_APIC(); setup_local_APIC(); map_cpu_to_logical_apicid(); -- cgit v1.2.1 From f915d7f46b84192a19647c8e6b111a7e518875cb Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:15 -0300 Subject: x86: do tests before do_boot_cpu in i386 Do tests before do_boot_cpu in native_cpu_up for i386. Tests are a little bit broader than originally, and are the same as x86_64. Test for smp_callin is not applicable right now and is deferred. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 8144aa3a1edf..147af81f70ea 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -711,10 +711,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) int apicid, ret; apicid = per_cpu(x86_cpu_to_apicid, cpu); - if (apicid == BAD_APICID) { - ret = -ENODEV; - goto exit; - } info.complete = &done; info.apicid = apicid; @@ -952,10 +948,22 @@ void __init native_smp_prepare_boot_cpu(void) int __cpuinit native_cpu_up(unsigned int cpu) { + int apicid = cpu_present_to_apicid(cpu); unsigned long flags; -#ifdef CONFIG_HOTPLUG_CPU int ret = 0; + WARN_ON(irqs_disabled()); + + Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); + + if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || + !physid_isset(apicid, phys_cpu_present_map)) { + printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); + return -EINVAL; + } + +#ifdef CONFIG_HOTPLUG_CPU + /* * We do warm boot only on cpus that had booted earlier * Otherwise cold boot is all handled from smp_boot_cpus(). -- cgit v1.2.1 From f3ce4466abd6f5173db98b5cc2269c139cd1790a Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:16 -0300 Subject: x86: make __smp_prepare_cpu void We have already removed the only condition that could fail here. so just don't test for any return value Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 147af81f70ea..ee6f3bd70390 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -704,11 +704,11 @@ static void __cpuinit do_warm_boot_cpu(struct work_struct *work) complete(info->complete); } -static int __cpuinit __smp_prepare_cpu(int cpu) +static void __cpuinit __smp_prepare_cpu(int cpu) { DECLARE_COMPLETION_ONSTACK(done); struct warm_boot_cpu_info info; - int apicid, ret; + int apicid; apicid = per_cpu(x86_cpu_to_apicid, cpu); @@ -725,9 +725,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) wait_for_completion(&done); zap_low_mappings(); - ret = 0; -exit: - return ret; } #endif @@ -950,7 +947,6 @@ int __cpuinit native_cpu_up(unsigned int cpu) { int apicid = cpu_present_to_apicid(cpu); unsigned long flags; - int ret = 0; WARN_ON(irqs_disabled()); @@ -971,10 +967,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) * when a cpu is taken offline from cpu_exit_clear(). */ if (!cpu_isset(cpu, cpu_callin_map)) - ret = __smp_prepare_cpu(cpu); - - if (ret) - return -EIO; + __smp_prepare_cpu(cpu); #endif /* In case one didn't come up */ -- cgit v1.2.1 From 8154fa3740d2bbc64aa46e75bcccb71dd82280d3 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:17 -0300 Subject: x86: move assignment of CPU_PREPARE before do_boot_cpu Done to match x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index ee6f3bd70390..0e86ccc90d82 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -958,6 +958,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) return -EINVAL; } + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; #ifdef CONFIG_HOTPLUG_CPU /* @@ -976,7 +977,6 @@ int __cpuinit native_cpu_up(unsigned int cpu) return -EIO; } - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Unleash the CPU! */ cpu_set(cpu, smp_commenced_mask); -- cgit v1.2.1 From 7e1efc0cde2a266fc31932ea7aed4bb20f524544 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:18 -0300 Subject: x86: unify extern masks declaration take them off smp_{32,64}.h and move to smp.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 12 ++++++++++++ include/asm-x86/smp_32.h | 8 -------- include/asm-x86/smp_64.h | 11 ----------- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b4c5143d7f8d..d02e6eacee39 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -3,12 +3,24 @@ #ifndef __ASSEMBLY__ #include #include +#include extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +extern u16 x86_cpu_to_apicid_init[]; +extern u16 x86_bios_cpu_apicid_init[]; +extern void *x86_cpu_to_apicid_early_ptr; +extern void *x86_bios_cpu_apicid_early_ptr; + +DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); +DECLARE_PER_CPU(cpumask_t, cpu_core_map); +DECLARE_PER_CPU(u16, cpu_llc_id); +DECLARE_PER_CPU(u16, x86_cpu_to_apicid); +DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); + /* * Trampoline 80x86 program as an array. */ diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 51624abda43a..478f5564630f 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -21,14 +21,6 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); -extern u16 __initdata x86_cpu_to_apicid_init[]; -extern void *x86_cpu_to_apicid_early_ptr; - -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(u16, x86_cpu_to_apicid); - #ifdef CONFIG_SMP /* * This function is needed by all SMP systems. It must _always_ be valid diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 394c78524331..1b3c0f1de9a9 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -19,17 +19,6 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -extern u16 __initdata x86_cpu_to_apicid_init[]; -extern u16 __initdata x86_bios_cpu_apicid_init[]; -extern void *x86_cpu_to_apicid_early_ptr; -extern void *x86_bios_cpu_apicid_early_ptr; - -DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); -DECLARE_PER_CPU(cpumask_t, cpu_core_map); -DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); - static inline int cpu_present_to_apicid(int mps_cpu) { if (cpu_present(mps_cpu)) -- cgit v1.2.1 From cbe879fc6c77b5751a91167654b75a39421d0f3f Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:19 -0300 Subject: x86: define bios to apicid mapping This mapping already exists in x86_64, just provide it for i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 6 ++++++ include/asm-x86/mach-bigsmp/mach_apic.h | 7 ++----- include/asm-x86/mach-es7000/mach_apic.h | 8 +++----- include/asm-x86/mach-summit/mach_apic.h | 3 +-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 0e86ccc90d82..92a5df6190b5 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -70,6 +70,12 @@ void *x86_cpu_to_apicid_early_ptr; DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); +u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata + = { [0 ... NR_CPUS-1] = BAD_APICID }; +void *x86_bios_cpu_apicid_early_ptr; +DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + u8 apicid_2_node[MAX_APICID]; static void map_cpu_to_logical_apicid(void); diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 6df235e8ea91..0d55b1f6d56b 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h @@ -1,10 +1,7 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H - -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) +#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) #define esr_disable (1) static inline int apic_id_registered(void) @@ -90,7 +87,7 @@ static inline int apicid_to_node(int logical_apicid) static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; } diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index d23011fdf454..04cba9f1e375 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -1,9 +1,7 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) +#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) #define esr_disable (1) static inline int apic_id_registered(void) @@ -80,7 +78,7 @@ extern void enable_apic_mode(void); extern int apic_version [MAX_APICS]; static inline void setup_apic_routing(void) { - int apic = bios_cpu_apicid[smp_processor_id()]; + int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); @@ -102,7 +100,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; + return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 062c97f6100b..91d7641cddc9 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@ -40,7 +40,6 @@ static inline unsigned long check_apicid_present(int bit) #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) -extern u8 bios_cpu_apicid[]; extern u8 cpu_2_logical_apicid[]; static inline void init_apic_ldr(void) @@ -110,7 +109,7 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; } -- cgit v1.2.1 From ccf82085ee32c9b171183d8042e8a6e2776ec628 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:20 -0300 Subject: x86: initialize map pointers in setup_32.c this will serve as a reference as to whether or not to use the per_cpu variables in mpparse. Done the same way as x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_32.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index d4ad6e8ae886..eb97bcfe0f6f 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -722,6 +722,18 @@ char * __init __attribute__((weak)) memory_setup(void) return machine_specific_memory_setup(); } +#ifdef CONFIG_NUMA +/* + * In the golden day, when everything among i386 and x86_64 will be + * integrated, this will not live here + */ +void *x86_cpu_to_node_map_early_ptr; +int x86_cpu_to_node_map_init[NR_CPUS] = { + [0 ... NR_CPUS-1] = NUMA_NO_NODE +}; +DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; +#endif + /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures @@ -855,6 +867,18 @@ void __init setup_arch(char **cmdline_p) io_delay_init(); +#ifdef CONFIG_X86_SMP + /* + * setup to use the early static init tables during kernel startup + * X86_SMP will exclude sub-arches that don't deal well with it. + */ + x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init; + x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init; +#ifdef CONFIG_NUMA + x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init; +#endif +#endif + #ifdef CONFIG_X86_GENERICARCH generic_apic_probe(); #endif -- cgit v1.2.1 From 04d1dd20f64f2b41baf5c01f57c574ca942ab4eb Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:21 -0300 Subject: x86: make node to apic mapping declarations unconditional Instead of declaring them inside of X86_64 ifdef, do it unconditionally Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/topology.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 8af05a93f097..dada89e5b152 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -32,15 +32,15 @@ /* Mappings between logical cpu number and node number */ #ifdef CONFIG_X86_32 extern int cpu_to_node_map[]; - #else -DECLARE_PER_CPU(int, x86_cpu_to_node_map); -extern int x86_cpu_to_node_map_init[]; -extern void *x86_cpu_to_node_map_early_ptr; /* Returns the number of the current Node. */ #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) #endif +DECLARE_PER_CPU(int, x86_cpu_to_node_map); +extern int x86_cpu_to_node_map_init[]; +extern void *x86_cpu_to_node_map_early_ptr; + extern cpumask_t node_to_cpumask_map[]; #define NUMA_NO_NODE (-1) -- cgit v1.2.1 From fbac7fcbadc54cc5d374873a2e60e924a056d198 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:22 -0300 Subject: x86: fix alloc_bootmem_pages_node macro missing a semicolon Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 274a59566c45..b9f5be2f603b 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@ -129,7 +129,7 @@ static inline int pfn_valid(int pfn) struct pglist_data __maybe_unused \ *__alloc_bootmem_node__pgdat = (pgdat); \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ - __pa(MAX_DMA_ADDRESS)) \ + __pa(MAX_DMA_ADDRESS)); \ }) #define alloc_bootmem_low_pages_node(pgdat, x) \ ({ \ -- cgit v1.2.1 From 4fe29a85642544503cf81e9cf251ef0f4e65b162 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:23 -0300 Subject: x86: use specialized routine for setup per-cpu area We use the same routing as x86_64, moved now to setup.c. Just with a few ifdefs inside. Note that this routing uses prefill_possible_map(). It has the very nice side effect of allowing hotplugging of cpus that are marked as present but disabled by acpi bios. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/setup.c | 103 +++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/setup64.c | 77 -------------------------------- arch/x86/kernel/smpboot_32.c | 2 + 5 files changed, 107 insertions(+), 79 deletions(-) create mode 100644 arch/x86/kernel/setup.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dbdd3142215c..fd27048087b8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -117,7 +117,7 @@ config ARCH_HAS_CPU_RELAX def_bool y config HAVE_SETUP_PER_CPU_AREA - def_bool X86_64 + def_bool X86_64 || (X86_SMP && !X86_VOYAGER) config ARCH_HIBERNATION_POSSIBLE def_bool y diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c436e747f502..5d33509fd1c1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -18,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp) obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o obj-y += traps_$(BITS).o irq_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o -obj-y += setup_$(BITS).o i8259_$(BITS).o +obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c new file mode 100644 index 000000000000..1179aa06cdbf --- /dev/null +++ b/arch/x86/kernel/setup.c @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +/* + * Copy data used in early init routines from the initial arrays to the + * per cpu data areas. These arrays then become expendable and the + * *_early_ptr's are zeroed indicating that the static arrays are gone. + */ +static void __init setup_per_cpu_maps(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { +#ifdef CONFIG_SMP + if (per_cpu_offset(cpu)) { +#endif + per_cpu(x86_cpu_to_apicid, cpu) = + x86_cpu_to_apicid_init[cpu]; + per_cpu(x86_bios_cpu_apicid, cpu) = + x86_bios_cpu_apicid_init[cpu]; +#ifdef CONFIG_NUMA + per_cpu(x86_cpu_to_node_map, cpu) = + x86_cpu_to_node_map_init[cpu]; +#endif +#ifdef CONFIG_SMP + } else + printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", + cpu); +#endif + } + + /* indicate the early static arrays will soon be gone */ + x86_cpu_to_apicid_early_ptr = NULL; + x86_bios_cpu_apicid_early_ptr = NULL; +#ifdef CONFIG_NUMA + x86_cpu_to_node_map_early_ptr = NULL; +#endif +} + +#ifdef CONFIG_X86_32 +/* + * Great future not-so-futuristic plan: make i386 and x86_64 do it + * the same way + */ +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); +#endif + +/* + * Great future plan: + * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. + * Always point %gs to its beginning + */ +void __init setup_per_cpu_areas(void) +{ + int i; + unsigned long size; + +#ifdef CONFIG_HOTPLUG_CPU + prefill_possible_map(); +#endif + + /* Copy section for each CPU (we discard the original) */ + size = PERCPU_ENOUGH_ROOM; + + printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", + size); + for_each_cpu_mask(i, cpu_possible_map) { + char *ptr; +#ifndef CONFIG_NEED_MULTIPLE_NODES + ptr = alloc_bootmem_pages(size); +#else + int node = early_cpu_to_node(i); + if (!node_online(node) || !NODE_DATA(node)) + ptr = alloc_bootmem_pages(size); + else + ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); +#endif + if (!ptr) + panic("Cannot allocate cpu data for CPU %d\n", i); +#ifdef CONFIG_X86_64 + cpu_pda(i)->data_offset = ptr - __per_cpu_start; +#else + __per_cpu_offset[i] = ptr - __per_cpu_start; +#endif + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + } + + /* setup percpu data maps early */ + setup_per_cpu_maps(); +} + +#endif diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index e24c45677094..6b4e3262e8cb 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -85,83 +85,6 @@ static int __init nonx32_setup(char *str) } __setup("noexec32=", nonx32_setup); -/* - * Copy data used in early init routines from the initial arrays to the - * per cpu data areas. These arrays then become expendable and the - * *_early_ptr's are zeroed indicating that the static arrays are gone. - */ -static void __init setup_per_cpu_maps(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { -#ifdef CONFIG_SMP - if (per_cpu_offset(cpu)) { -#endif - per_cpu(x86_cpu_to_apicid, cpu) = - x86_cpu_to_apicid_init[cpu]; - per_cpu(x86_bios_cpu_apicid, cpu) = - x86_bios_cpu_apicid_init[cpu]; -#ifdef CONFIG_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = - x86_cpu_to_node_map_init[cpu]; -#endif -#ifdef CONFIG_SMP - } - else - printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", - cpu); -#endif - } - - /* indicate the early static arrays will soon be gone */ - x86_cpu_to_apicid_early_ptr = NULL; - x86_bios_cpu_apicid_early_ptr = NULL; -#ifdef CONFIG_NUMA - x86_cpu_to_node_map_early_ptr = NULL; -#endif -} - -/* - * Great future plan: - * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. - * Always point %gs to its beginning - */ -void __init setup_per_cpu_areas(void) -{ - int i; - unsigned long size; - -#ifdef CONFIG_HOTPLUG_CPU - prefill_possible_map(); -#endif - - /* Copy section for each CPU (we discard the original) */ - size = PERCPU_ENOUGH_ROOM; - - printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); - for_each_cpu_mask (i, cpu_possible_map) { - char *ptr; -#ifndef CONFIG_NEED_MULTIPLE_NODES - ptr = alloc_bootmem_pages(size); -#else - int node = early_cpu_to_node(i); - - if (!node_online(node) || !NODE_DATA(node)) - ptr = alloc_bootmem_pages(size); - else - ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); -#endif - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); - cpu_pda(i)->data_offset = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - } - - /* setup percpu data maps early */ - setup_per_cpu_maps(); -} - void pda_init(int cpu) { struct x8664_pda *pda = cpu_pda(cpu); diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 92a5df6190b5..bf5c9e9f26c1 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -665,6 +665,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) unmap_cpu_to_logical_apicid(cpu); cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ + cpu_clear(cpu, cpu_possible_map); cpucount--; } else { per_cpu(x86_cpu_to_apicid, cpu) = apicid; @@ -743,6 +744,7 @@ EXPORT_SYMBOL(xquad_portio); static void __init disable_smp(void) { + cpu_possible_map = cpumask_of_cpu(0); smpboot_clear_io_apic_irqs(); phys_cpu_present_map = physid_mask_of_physid(0); map_cpu_to_logical_apicid(); -- cgit v1.2.1 From 73bf102b1cadc53d418df02ba687769a9f916a6d Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:24 -0300 Subject: x86: fill bios cpu to apicid maps We fill the per-cpu (or array) that maps bios cpu id to apicid in mpparse_32.c, the way x86_64 does Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 1b1fd6e920e6..6ea97163701f 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -75,8 +75,6 @@ unsigned disabled_cpus __cpuinitdata; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; -u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; - /* * Intel MP BIOS table parsing routines: */ @@ -220,7 +218,14 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) def_to_bigsmp = 1; } } - bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; + /* are we being called early in kernel startup? */ + if (x86_cpu_to_apicid_early_ptr) { + u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; + bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; + } else { + int cpu = num_processors - 1; + per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; + } } static void __init MP_bus_info (struct mpc_config_bus *m) -- cgit v1.2.1 From a6c422ccdb57924bd20ae408dba8e9db01d09677 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:25 -0300 Subject: x86: fill cpu to apicid and present map in mpparse This is the way x86_64 does, and complement the already present patch that does the bios cpu to apicid mapping here Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 19 +++++++++++++++++-- arch/x86/kernel/smpboot_32.c | 24 +++++++----------------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 6ea97163701f..a0cec74b80ef 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -105,7 +105,8 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinit static void __cpuinit MP_processor_info (struct mpc_config_processor *m) { - int ver, apicid; + int ver, apicid, cpu; + cpumask_t tmp_map; physid_mask_t phys_cpu; if (!(m->mpc_cpuflag & CPU_ENABLED)) { @@ -198,6 +199,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) cpu_set(num_processors, cpu_possible_map); num_processors++; + cpus_complement(tmp_map, cpu_present_map); + cpu = first_cpu(tmp_map); + + if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) + /* + * x86_bios_cpu_apicid is required to have processors listed + * in same order as logical cpu numbers. Hence the first + * entry is BSP, and so on. + */ + cpu = 0; /* * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y @@ -220,12 +231,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) } /* are we being called early in kernel startup? */ if (x86_cpu_to_apicid_early_ptr) { + u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; + + cpu_to_apicid[cpu] = m->mpc_apicid; bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; } else { - int cpu = num_processors - 1; + per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; } + cpu_set(cpu, cpu_present_map); } static void __init MP_bus_info (struct mpc_config_bus *m) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index bf5c9e9f26c1..2fea910eff43 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -525,16 +525,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) #endif /* WAKE_SECONDARY_VIA_INIT */ extern cpumask_t cpu_initialized; -static inline int alloc_cpu_id(void) -{ - cpumask_t tmp_map; - int cpu; - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); - if (cpu >= NR_CPUS) - return -ENODEV; - return cpu; -} #ifdef CONFIG_HOTPLUG_CPU static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; @@ -605,7 +595,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) irq_ctx_init(cpu); - per_cpu(x86_cpu_to_apicid, cpu) = apicid; /* * This grunge runs the startup process for * the targeted processor. @@ -666,10 +655,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ cpu_clear(cpu, cpu_possible_map); + per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; cpucount--; - } else { - per_cpu(x86_cpu_to_apicid, cpu) = apicid; - cpu_set(cpu, cpu_present_map); } /* mark "stuck" area as not stuck */ @@ -745,6 +732,7 @@ EXPORT_SYMBOL(xquad_portio); static void __init disable_smp(void) { cpu_possible_map = cpumask_of_cpu(0); + cpu_present_map = cpumask_of_cpu(0); smpboot_clear_io_apic_irqs(); phys_cpu_present_map = physid_mask_of_physid(0); map_cpu_to_logical_apicid(); @@ -825,7 +813,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); boot_cpu_logical_apicid = logical_smp_processor_id(); - per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid; current_thread_info()->cpu = 0; @@ -866,8 +853,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus) continue; if (max_cpus <= cpucount+1) continue; - - if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) + /* Utterly temporary */ + for (cpu = 0; cpu < NR_CPUS; cpu++) + if (per_cpu(x86_cpu_to_apicid, cpu) == apicid) + break; + if (do_boot_cpu(apicid, cpu)) printk("CPU #%d not responding - cannot use it.\n", apicid); else -- cgit v1.2.1 From 1161705bd66df0c80fa45e87190e456c02e6f145 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 19 Mar 2008 20:26:15 +0100 Subject: x86: fill cpu to apicid and present map in mpparse, fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index a0cec74b80ef..000b51b78fbd 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -75,6 +75,10 @@ unsigned disabled_cpus __cpuinitdata; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; +#ifndef CONFIG_SMP +DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; +#endif + /* * Intel MP BIOS table parsing routines: */ @@ -229,6 +233,7 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) def_to_bigsmp = 1; } } +#ifdef CONFIG_SMP /* are we being called early in kernel startup? */ if (x86_cpu_to_apicid_early_ptr) { u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; @@ -240,6 +245,7 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; } +#endif cpu_set(cpu, cpu_present_map); } -- cgit v1.2.1 From e1a14d0c1391627d869c0f97bb5e2382bf36d8dc Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:26 -0300 Subject: x86: get rid of cpucount weighting a map will do. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 2fea910eff43..5c4e85cceb16 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -166,8 +166,6 @@ static void __cpuinit smp_callin(void) cpu_set(cpuid, cpu_callin_map); } -static int cpucount; - /* * Activate a secondary processor. */ @@ -585,7 +583,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); - ++cpucount; alternatives_smp_switch(1); /* So we see what's up */ @@ -656,7 +653,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ cpu_clear(cpu, cpu_possible_map); per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; - cpucount--; } /* mark "stuck" area as not stuck */ @@ -672,7 +668,6 @@ void cpu_exit_clear(void) idle_task_exit(); - cpucount --; cpu_uninit(); irq_ctx_exit(cpu); @@ -795,7 +790,6 @@ static int __init smp_sanity_check(unsigned max_cpus) return 0; } - /* * Cycle through the processors sending APIC IPIs to boot each. */ @@ -851,7 +845,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) if (!check_apicid_present(bit)) continue; - if (max_cpus <= cpucount+1) + if (max_cpus <= cpus_weight(cpu_present_map)) continue; /* Utterly temporary */ for (cpu = 0; cpu < NR_CPUS; cpu++) @@ -878,7 +872,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) bogosum += cpu_data(cpu).loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", - cpucount+1, + cpus_weight(cpu_present_map), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); @@ -892,7 +886,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) * approved Athlon */ if (tainted & TAINT_UNSAFE_SMP) { - if (cpucount) + if (cpus_weight(cpu_present_map)) printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); else tainted &= ~TAINT_UNSAFE_SMP; -- cgit v1.2.1 From 904541e2f76bc3efe4cc9978b7adb3323ea8607e Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:27 -0300 Subject: x86: allow user to impress friends. Impressing friends is a very important thing. Do it in a separate function to make it even more explicit, and ease integration. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 20 ++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 17 ++--------------- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a157a5245923..02427d1003d3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -228,6 +228,26 @@ void __init smp_alloc_memory(void) } #endif +void impress_friends(void) +{ + int cpu; + unsigned long bogosum = 0; + /* + * Allow the user to impress friends. + */ + Dprintk("Before bogomips.\n"); + for_each_possible_cpu(cpu) + if (cpu_isset(cpu, cpu_callout_map)) + bogosum += cpu_data(cpu).loops_per_jiffy; + printk(KERN_INFO + "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", + cpus_weight(cpu_present_map), + bogosum/(500000/HZ), + (bogosum/(5000/HZ))%100); + + Dprintk("Before bogocount - setting activated=1.\n"); +} + #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) { diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5c4e85cceb16..34493f8ba8ac 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -790,13 +790,13 @@ static int __init smp_sanity_check(unsigned max_cpus) return 0; } +extern void impress_friends(void); /* * Cycle through the processors sending APIC IPIs to boot each. */ static void __init smp_boot_cpus(unsigned int max_cpus) { int apicid, cpu, bit, kicked; - unsigned long bogosum = 0; /* * Setup boot CPU information @@ -863,20 +863,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) */ smpboot_restore_warm_reset_vector(); - /* - * Allow the user to impress friends. - */ - Dprintk("Before bogomips.\n"); - for_each_possible_cpu(cpu) - if (cpu_isset(cpu, cpu_callout_map)) - bogosum += cpu_data(cpu).loops_per_jiffy; - printk(KERN_INFO - "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", - cpus_weight(cpu_present_map), - bogosum/(500000/HZ), - (bogosum/(5000/HZ))%100); - - Dprintk("Before bogocount - setting activated=1.\n"); + impress_friends(); if (smp_b_stepping) printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); -- cgit v1.2.1 From 693d4b8a6429af7f2029df20a59e22f4d752e141 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:28 -0300 Subject: x86: do smp tainting checks in a separate function It will ease integration for x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 21 +++++++++++++++++++-- arch/x86/kernel/smpboot_32.c | 20 ++------------------ 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 02427d1003d3..ddb94ef37789 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -45,10 +45,8 @@ unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; -#ifdef CONFIG_X86_32 /* Set if we find a B stepping CPU */ int __cpuinitdata smp_b_stepping; -#endif static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) { @@ -105,6 +103,25 @@ valid_k7: #endif } +void smp_checks(void) +{ + if (smp_b_stepping) + printk(KERN_WARNING "WARNING: SMP operation may be unreliable" + "with B stepping processors.\n"); + + /* + * Don't taint if we are running SMP kernel on a single non-MP + * approved Athlon + */ + if (tainted & TAINT_UNSAFE_SMP) { + if (cpus_weight(cpu_present_map)) + printk(KERN_INFO "WARNING: This combination of AMD" + "processors is not suitable for SMP.\n"); + else + tainted &= ~TAINT_UNSAFE_SMP; + } +} + /* * The bootstrap kernel entry code has set these up. Save them for * a given CPU diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 34493f8ba8ac..361851cdaa97 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -59,8 +59,6 @@ #include #include -extern int smp_b_stepping; - static cpumask_t smp_commenced_mask; /* which logical CPU number maps to which CPU (physical APIC ID) */ @@ -791,6 +789,7 @@ static int __init smp_sanity_check(unsigned max_cpus) } extern void impress_friends(void); +extern void smp_checks(void); /* * Cycle through the processors sending APIC IPIs to boot each. */ @@ -865,22 +864,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) impress_friends(); - if (smp_b_stepping) - printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); - - /* - * Don't taint if we are running SMP kernel on a single non-MP - * approved Athlon - */ - if (tainted & TAINT_UNSAFE_SMP) { - if (cpus_weight(cpu_present_map)) - printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); - else - tainted &= ~TAINT_UNSAFE_SMP; - } - - Dprintk("Boot done.\n"); - + smp_checks(); /* * construct cpu_sibling_map, so that we can tell sibling CPUs * efficiently. -- cgit v1.2.1 From f68e00a32b4f5a2881c3a39d71cc2c22e92f1d99 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:29 -0300 Subject: x86: move impress_friends and smp_check to cpus_done the cpu count is changed accordingly: now, what matters is online cpus. Also, we add those functions for x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 4 ++-- arch/x86/kernel/smpboot_32.c | 22 ++++++++++++---------- arch/x86/kernel/smpboot_64.c | 8 ++++++++ 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ddb94ef37789..6978f1bf6533 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -114,7 +114,7 @@ void smp_checks(void) * approved Athlon */ if (tainted & TAINT_UNSAFE_SMP) { - if (cpus_weight(cpu_present_map)) + if (num_online_cpus()) printk(KERN_INFO "WARNING: This combination of AMD" "processors is not suitable for SMP.\n"); else @@ -258,7 +258,7 @@ void impress_friends(void) bogosum += cpu_data(cpu).loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", - cpus_weight(cpu_present_map), + num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 361851cdaa97..1736404c3c36 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -788,8 +788,6 @@ static int __init smp_sanity_check(unsigned max_cpus) return 0; } -extern void impress_friends(void); -extern void smp_checks(void); /* * Cycle through the processors sending APIC IPIs to boot each. */ @@ -857,14 +855,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ++kicked; } - /* - * Cleanup possible dangling ends... - */ - smpboot_restore_warm_reset_vector(); - - impress_friends(); - - smp_checks(); /* * construct cpu_sibling_map, so that we can tell sibling CPUs * efficiently. @@ -959,8 +949,20 @@ int __cpuinit native_cpu_up(unsigned int cpu) return 0; } +extern void impress_friends(void); +extern void smp_checks(void); + void __init native_smp_cpus_done(unsigned int max_cpus) { + /* + * Cleanup possible dangling ends... + */ + smpboot_restore_warm_reset_vector(); + + Dprintk("Boot done.\n"); + + impress_friends(); + smp_checks(); #ifdef CONFIG_X86_IO_APIC setup_ioapic_dest(); #endif diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index a9cc91127b91..c3e770b0094b 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -824,12 +824,20 @@ int __cpuinit native_cpu_up(unsigned int cpu) return err; } +extern void impress_friends(void); +extern void smp_checks(void); + /* * Finish the SMP boot. */ void __init native_smp_cpus_done(unsigned int max_cpus) { smp_cleanup_boot(); + + Dprintk("Boot done.\n"); + + impress_friends(); + smp_checks(); setup_ioapic_dest(); check_nmi_watchdog(); } -- cgit v1.2.1 From ee09efc3e4173632471d2dff6ca42e21930c94fe Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:30 -0300 Subject: x86: add subarch support (for headers) to x86_64 this patch allows x86_64 to use subarch mach_ headers in practice, since x86_64 does not have any subarch, it will use mach_default. But it will allow for substantially less code duplication Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 03131ce1d526..3cff3c894cf3 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -151,7 +151,6 @@ mflags-y += -Iinclude/asm-x86/mach-default # 64 bit does not support subarch support - clear sub arch variables fcore-$(CONFIG_X86_64) := mcore-$(CONFIG_X86_64) := -mflags-$(CONFIG_X86_64) := KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) -- cgit v1.2.1 From 8d77010f8c93b4d41ffd71c7ad9d07fc1668cd5a Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:31 -0300 Subject: x86: include mach_wakecpu.h in smpboot_64 Do it and also fix conflicts, which automatically makes x86_64 look closer to i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index c3e770b0094b..c6c993f4c415 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -60,6 +60,8 @@ #include #include +#include + /* Set when the idlers are all forked */ int smp_threads_ready; @@ -85,13 +87,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) #endif -static inline void wait_for_init_deassert(atomic_t *deassert) -{ - while (!atomic_read(deassert)) - cpu_relax(); - return; -} - static atomic_t init_deasserted __cpuinitdata; /* @@ -247,7 +242,7 @@ extern volatile unsigned long init_rsp; extern void (*initial_code)(void); #ifdef APIC_DEBUG -static void inquire_remote_apic(int apicid) +static void __inquire_remote_apic(int apicid) { unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; char *names[] = { "ID", "VERSION", "SPIV" }; -- cgit v1.2.1 From eb44d0a2a9c4d64ed89044fcf1f75e6a27c42ea7 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:32 -0300 Subject: x86: include smpboot_hooks.h in smpboot_64.c We do it and also fix conflicts, which makes x86_64 automatically closer to i386 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index c6c993f4c415..b9384b3af017 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -61,6 +61,7 @@ #include #include +#include /* Set when the idlers are all forked */ int smp_threads_ready; @@ -517,14 +518,7 @@ do_rest: Dprintk("Setting warm reset code and vector.\n"); - CMOS_WRITE(0xa, 0xf); - local_flush_tlb(); - Dprintk("1.\n"); - *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4; - Dprintk("2.\n"); - *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf; - Dprintk("3.\n"); - + smpboot_setup_warm_reset_vector(start_rip); /* * Be paranoid about clearing APIC errors. */ @@ -593,23 +587,6 @@ do_rest: cycles_t cacheflush_time; unsigned long cache_decay_ticks; -/* - * Cleanup possible dangling ends... - */ -static __cpuinit void smp_cleanup_boot(void) -{ - /* - * Paranoid: Set warm reset code and vector here back - * to default values. - */ - CMOS_WRITE(0, 0xf); - - /* - * Reset trampoline flag - */ - *((volatile int *) phys_to_virt(0x467)) = 0; -} - /* * Fall back to non SMP mode after errors. * @@ -827,7 +804,7 @@ extern void smp_checks(void); */ void __init native_smp_cpus_done(unsigned int max_cpus) { - smp_cleanup_boot(); + smpboot_restore_warm_reset_vector(); Dprintk("Boot done.\n"); -- cgit v1.2.1 From 17c9ab1eabcc08794064a6e3232ac421664c9ce1 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:33 -0300 Subject: x86: move smp_intr_init away from smpboot_32.c We move it to apic_32.c, since it's irq related anyway, and only called from that file. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 23 +++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 21 --------------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 6aa93db7faa3..c32cc0feb47b 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1317,6 +1317,29 @@ void smp_error_interrupt(struct pt_regs *regs) irq_exit(); } +#ifdef CONFIG_SMP +void __init smp_intr_init(void) +{ + /* + * IRQ0 must be given a fixed assignment and initialized, + * because it's used before the IO-APIC is set up. + */ + set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); + + /* + * The reschedule interrupt is a CPU-to-CPU reschedule-helper + * IPI, driven by wakeup. + */ + set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); + + /* IPI for invalidation */ + set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); + + /* IPI for generic function call */ + set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); +} +#endif + /* * Initialize APIC interrupts */ diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 1736404c3c36..87c9a75d929c 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -968,24 +968,3 @@ void __init native_smp_cpus_done(unsigned int max_cpus) #endif zap_low_mappings(); } - -void __init smp_intr_init(void) -{ - /* - * IRQ0 must be given a fixed assignment and initialized, - * because it's used before the IO-APIC is set up. - */ - set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); - - /* - * The reschedule interrupt is a CPU-to-CPU reschedule-helper - * IPI, driven by wakeup. - */ - set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); - - /* IPI for invalidation */ - set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); - - /* IPI for generic function call */ - set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); -} -- cgit v1.2.1 From 3cf19f31d967da2c1279142d4dbafe18f521a1bf Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:34 -0300 Subject: x86: don't set maps in native_smp_prepare_boot_cpu() By this time, they are already set in init routines Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 87c9a75d929c..bfdfe3c64d06 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -889,10 +889,7 @@ void __init native_smp_prepare_boot_cpu(void) init_gdt(cpu); switch_to_new_gdt(); - cpu_set(cpu, cpu_online_map); cpu_set(cpu, cpu_callout_map); - cpu_set(cpu, cpu_present_map); - cpu_set(cpu, cpu_possible_map); __get_cpu_var(cpu_state) = CPU_ONLINE; } -- cgit v1.2.1 From e32ede19ac64b5cd896e6d28aa51d34887791ab2 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:35 -0300 Subject: x86: wipe get_nmi_reason out of nmi_64.h use mach_traps when it is supposed to be used. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/nmi_64.c | 2 ++ arch/x86/kernel/traps_64.c | 2 ++ include/asm-x86/nmi_64.h | 2 -- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 9a4fde74bee1..11f9130ac513 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c @@ -26,6 +26,8 @@ #include #include +#include + int unknown_nmi_panic; int nmi_watchdog_enabled; int panic_on_unrecovered_nmi; diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 045466681911..33292ac814f4 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -33,6 +33,8 @@ #include #include +#include + #if defined(CONFIG_EDAC) #include #endif diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h index 2eeb74e5f3ff..94a5b19e3620 100644 --- a/include/asm-x86/nmi_64.h +++ b/include/asm-x86/nmi_64.h @@ -36,8 +36,6 @@ static inline void unset_nmi_pm_callback(struct pm_dev * dev) extern void default_do_nmi(struct pt_regs *); extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); -#define get_nmi_reason() inb(0x61) - extern int unknown_nmi_panic; extern int nmi_watchdog_enabled; -- cgit v1.2.1 From 6d60cd5359e261cad1e519e77ca733c05c2f8025 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:36 -0300 Subject: x86: unify nmi_32.h and nmi_64.h Two more files goes away. nmi_64.h and nmi_32.h gives birth to nmi.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/nmi_32.c | 2 +- include/asm-x86/nmi.h | 92 ++++++++++++++++++++++++++++++++++++++++++++++-- include/asm-x86/nmi_32.h | 61 -------------------------------- include/asm-x86/nmi_64.h | 88 --------------------------------------------- 4 files changed, 90 insertions(+), 153 deletions(-) delete mode 100644 include/asm-x86/nmi_32.h delete mode 100644 include/asm-x86/nmi_64.h diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 6a0aa7038685..167385155765 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -67,7 +67,7 @@ static __init void nmi_cpu_busy(void *data) } #endif -static int __init check_nmi_watchdog(void) +int __init check_nmi_watchdog(void) { unsigned int *prev_nmi_count; int cpu; diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 53ccac14cead..2b9419903476 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h @@ -1,5 +1,91 @@ -#ifdef CONFIG_X86_32 -# include "nmi_32.h" +#ifndef _ASM_X86_NMI_H_ +#define _ASM_X86_NMI_H_ + +#include +#include +#include + +#ifdef ARCH_HAS_NMI_WATCHDOG + +/** + * do_nmi_callback + * + * Check to see if a callback exists and execute it. Return 1 + * if the handler exists and was handled successfully. + */ +int do_nmi_callback(struct pt_regs *regs, int cpu); + +#ifdef CONFIG_PM + +/** Replace the PM callback routine for NMI. */ +struct pm_dev *set_nmi_pm_callback(pm_callback callback); + +/** Unset the PM callback routine back to the default. */ +void unset_nmi_pm_callback(struct pm_dev *dev); + #else -# include "nmi_64.h" + +static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback) +{ + return 0; +} + +static inline void unset_nmi_pm_callback(struct pm_dev *dev) +{ +} + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_X86_64 +extern void default_do_nmi(struct pt_regs *); +extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); +#endif + +extern int check_nmi_watchdog(void); +extern int nmi_watchdog_enabled; +extern int unknown_nmi_panic; +extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); +extern int avail_to_resrv_perfctr_nmi(unsigned int); +extern int reserve_perfctr_nmi(unsigned int); +extern void release_perfctr_nmi(unsigned int); +extern int reserve_evntsel_nmi(unsigned int); +extern void release_evntsel_nmi(unsigned int); +extern void nmi_watchdog_default(void); + +extern void setup_apic_nmi_watchdog(void *); +extern void stop_apic_nmi_watchdog(void *); +extern void disable_timer_nmi_watchdog(void); +extern void enable_timer_nmi_watchdog(void); +extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); + +extern atomic_t nmi_active; +extern unsigned int nmi_watchdog; +#define NMI_DISABLED -1 +#define NMI_NONE 0 +#define NMI_IO_APIC 1 +#define NMI_LOCAL_APIC 2 +#define NMI_INVALID 3 +#define NMI_DEFAULT NMI_DISABLED + +struct ctl_table; +struct file; +extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, + void __user *, size_t *, loff_t *); +extern int unknown_nmi_panic; + +void __trigger_all_cpu_backtrace(void); +#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() + +#endif + +void lapic_watchdog_stop(void); +int lapic_watchdog_init(unsigned nmi_hz); +int lapic_wd_event(unsigned nmi_hz); +unsigned lapic_adjust_nmi_hz(unsigned hz); +int lapic_watchdog_ok(void); +void disable_lapic_nmi_watchdog(void); +void enable_lapic_nmi_watchdog(void); +void stop_nmi(void); +void restart_nmi(void); + #endif diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h deleted file mode 100644 index 7206c7e8a388..000000000000 --- a/include/asm-x86/nmi_32.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef ASM_NMI_H -#define ASM_NMI_H - -#include -#include - -#ifdef ARCH_HAS_NMI_WATCHDOG - -/** - * do_nmi_callback - * - * Check to see if a callback exists and execute it. Return 1 - * if the handler exists and was handled successfully. - */ -int do_nmi_callback(struct pt_regs *regs, int cpu); - -extern int nmi_watchdog_enabled; -extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -extern int avail_to_resrv_perfctr_nmi(unsigned int); -extern int reserve_perfctr_nmi(unsigned int); -extern void release_perfctr_nmi(unsigned int); -extern int reserve_evntsel_nmi(unsigned int); -extern void release_evntsel_nmi(unsigned int); - -extern void setup_apic_nmi_watchdog (void *); -extern void stop_apic_nmi_watchdog (void *); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); - -extern atomic_t nmi_active; -extern unsigned int nmi_watchdog; -#define NMI_DISABLED -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 -#define NMI_DEFAULT NMI_DISABLED - -struct ctl_table; -struct file; -extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, - void __user *, size_t *, loff_t *); -extern int unknown_nmi_panic; - -void __trigger_all_cpu_backtrace(void); -#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() - -#endif - -void lapic_watchdog_stop(void); -int lapic_watchdog_init(unsigned nmi_hz); -int lapic_wd_event(unsigned nmi_hz); -unsigned lapic_adjust_nmi_hz(unsigned hz); -int lapic_watchdog_ok(void); -void disable_lapic_nmi_watchdog(void); -void enable_lapic_nmi_watchdog(void); -void stop_nmi(void); -void restart_nmi(void); - -#endif /* ASM_NMI_H */ diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h deleted file mode 100644 index 94a5b19e3620..000000000000 --- a/include/asm-x86/nmi_64.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef ASM_NMI_H -#define ASM_NMI_H - -#include -#include - -/** - * do_nmi_callback - * - * Check to see if a callback exists and execute it. Return 1 - * if the handler exists and was handled successfully. - */ -int do_nmi_callback(struct pt_regs *regs, int cpu); - -#ifdef CONFIG_PM - -/** Replace the PM callback routine for NMI. */ -struct pm_dev * set_nmi_pm_callback(pm_callback callback); - -/** Unset the PM callback routine back to the default. */ -void unset_nmi_pm_callback(struct pm_dev * dev); - -#else - -static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback) -{ - return 0; -} - -static inline void unset_nmi_pm_callback(struct pm_dev * dev) -{ -} - -#endif /* CONFIG_PM */ - -extern void default_do_nmi(struct pt_regs *); -extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); - -extern int unknown_nmi_panic; -extern int nmi_watchdog_enabled; - -extern int check_nmi_watchdog(void); -extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -extern int avail_to_resrv_perfctr_nmi(unsigned int); -extern int reserve_perfctr_nmi(unsigned int); -extern void release_perfctr_nmi(unsigned int); -extern int reserve_evntsel_nmi(unsigned int); -extern void release_evntsel_nmi(unsigned int); - -extern void setup_apic_nmi_watchdog (void *); -extern void stop_apic_nmi_watchdog (void *); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); - -extern void nmi_watchdog_default(void); - -extern atomic_t nmi_active; -extern unsigned int nmi_watchdog; -#define NMI_DISABLED -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 -#define NMI_DEFAULT NMI_DISABLED - -struct ctl_table; -struct file; -extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, - void __user *, size_t *, loff_t *); - -extern int unknown_nmi_panic; - -void __trigger_all_cpu_backtrace(void); -#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() - - -void lapic_watchdog_stop(void); -int lapic_watchdog_init(unsigned nmi_hz); -int lapic_wd_event(unsigned nmi_hz); -unsigned lapic_adjust_nmi_hz(unsigned hz); -int lapic_watchdog_ok(void); -void disable_lapic_nmi_watchdog(void); -void enable_lapic_nmi_watchdog(void); -void stop_nmi(void); -void restart_nmi(void); - -#endif /* ASM_NMI_H */ -- cgit v1.2.1 From 4626df1801dc03de42f1c155417393b91c8f5d97 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:37 -0300 Subject: x86: call check_nmi_watchdog explicitly in native_smp_cpus_done With this, remove its late_initcall marker from nmi_32.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/nmi_32.c | 2 -- arch/x86/kernel/smpboot_32.c | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 167385155765..9cfc094eddb0 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -129,8 +129,6 @@ int __init check_nmi_watchdog(void) kfree(prev_nmi_count); return 0; } -/* This needs to happen later in boot so counters are working */ -late_initcall(check_nmi_watchdog); static int __init setup_nmi_watchdog(char *str) { diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index bfdfe3c64d06..1f3aff4caaf7 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -963,5 +963,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) #ifdef CONFIG_X86_IO_APIC setup_ioapic_dest(); #endif + check_nmi_watchdog(); zap_low_mappings(); } -- cgit v1.2.1 From 50e440aa5323860d9e5960143b720e461ed0c582 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:38 -0300 Subject: x86: call nmi_watchdog_default in i386 this does not exist, so it will be an empty macro Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 1 + include/asm-x86/nmi.h | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 1f3aff4caaf7..a35055361b85 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -876,6 +876,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ void __init native_smp_prepare_cpus(unsigned int max_cpus) { + nmi_watchdog_default(); smp_commenced_mask = cpumask_of_cpu(0); cpu_callin_map = cpumask_of_cpu(0); mb(); diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 2b9419903476..1e363021e72f 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h @@ -39,6 +39,9 @@ static inline void unset_nmi_pm_callback(struct pm_dev *dev) #ifdef CONFIG_X86_64 extern void default_do_nmi(struct pt_regs *); extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); +extern void nmi_watchdog_default(void); +#else +#define nmi_watchdog_default() do {} while (0) #endif extern int check_nmi_watchdog(void); @@ -50,7 +53,6 @@ extern int reserve_perfctr_nmi(unsigned int); extern void release_perfctr_nmi(unsigned int); extern int reserve_evntsel_nmi(unsigned int); extern void release_evntsel_nmi(unsigned int); -extern void nmi_watchdog_default(void); extern void setup_apic_nmi_watchdog(void *); extern void stop_apic_nmi_watchdog(void *); -- cgit v1.2.1 From e7f8b14e028f7a2f9e5c83c17164aeeeb9c61f17 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:39 -0300 Subject: x86: don't initialize sibling and core maps during preparation it is redundant, since it is already done by set_cpu_sibling_map() Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index a35055361b85..5cae17f3eb75 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -855,18 +855,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ++kicked; } - /* - * construct cpu_sibling_map, so that we can tell sibling CPUs - * efficiently. - */ - for_each_possible_cpu(cpu) { - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); - } - - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); - smpboot_setup_io_apic(); setup_boot_clock(); -- cgit v1.2.1 From 802b8133b4f78c30a2668d142d78861e27c0c6a7 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:41 -0300 Subject: x86: schedule work only if keventd is already running Only call schedule_work if keventd is already running. This is already the way x86_64 does Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5cae17f3eb75..255c6f761480 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -708,8 +708,12 @@ static void __cpuinit __smp_prepare_cpu(int cpu) clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); flush_tlb_all(); - schedule_work(&info.task); - wait_for_completion(&done); + if (!keventd_up() || current_is_keventd()) + info.task.func(&info.task); + else { + schedule_work(&info.task); + wait_for_completion(&done); + } zap_low_mappings(); } -- cgit v1.2.1 From d2bcbad5f3ad38a1c09861bca7e252dde7bb8259 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:42 -0300 Subject: x86: do not zap_low_mappings in __smp_prepare_cpus It was okay when cpus were cold booted before this point. But with the new state machine, they will not have arrived to the trampoline yet. zapping low mappings will have the bad effect of breaking it completely after paging enablement Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 255c6f761480..88ee65585d3f 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -195,11 +195,6 @@ static void __cpuinit start_secondary(void *unused) enable_NMI_through_LVT0(); enable_8259A_irq(0); } - /* - * low-memory mappings have been cleared, flush them from - * the local TLBs too. - */ - local_flush_tlb(); /* This must be done before setting cpu_online_map */ set_cpu_sibling_map(raw_smp_processor_id()); @@ -714,8 +709,6 @@ static void __cpuinit __smp_prepare_cpu(int cpu) schedule_work(&info.task); wait_for_completion(&done); } - - zap_low_mappings(); } #endif -- cgit v1.2.1 From 9713277607f9eac7d655c6854dd92bc2ce1b6f02 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:43 -0300 Subject: x86: boot cpus from cpu_up, instead of prepare_cpus After all the infrastructure work, we're now prepared to boot the cpus from cpu_up, and not from prepare_cpus. So the difference between cold boot and hotplug is effectively over, and the functions are used to the purposes they're meant to. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 48 ++------------------------------------------ 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 88ee65585d3f..978e13708ddb 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -670,6 +670,7 @@ void cpu_exit_clear(void) cpu_clear(cpu, smp_commenced_mask); unmap_cpu_to_logical_apicid(cpu); } +#endif struct warm_boot_cpu_info { struct completion *complete; @@ -710,7 +711,6 @@ static void __cpuinit __smp_prepare_cpu(int cpu) wait_for_completion(&done); } } -#endif static int boot_cpu_logical_apicid; /* Where the IO area was mapped on multiquad, always 0 otherwise */ @@ -790,8 +790,6 @@ static int __init smp_sanity_check(unsigned max_cpus) */ static void __init smp_boot_cpus(unsigned int max_cpus) { - int apicid, cpu, bit, kicked; - /* * Setup boot CPU information */ @@ -819,39 +817,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) setup_portio_remap(); - /* - * Scan the CPU present map and fire up the other CPUs via do_boot_cpu - * - * In clustered apic mode, phys_cpu_present_map is a constructed thus: - * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the - * clustered apic ID. - */ - Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); - - kicked = 1; - for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { - apicid = cpu_present_to_apicid(bit); - /* - * Don't even attempt to start the boot CPU! - */ - if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) - continue; - - if (!check_apicid_present(bit)) - continue; - if (max_cpus <= cpus_weight(cpu_present_map)) - continue; - /* Utterly temporary */ - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (per_cpu(x86_cpu_to_apicid, cpu) == apicid) - break; - if (do_boot_cpu(apicid, cpu)) - printk("CPU #%d not responding - cannot use it.\n", - apicid); - else - ++kicked; - } - smpboot_setup_io_apic(); setup_boot_clock(); @@ -895,17 +860,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) } per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; -#ifdef CONFIG_HOTPLUG_CPU - /* - * We do warm boot only on cpus that had booted earlier - * Otherwise cold boot is all handled from smp_boot_cpus(). - * cpu_callin_map is set during AP kickstart process. Its reset - * when a cpu is taken offline from cpu_exit_clear(). - */ - if (!cpu_isset(cpu, cpu_callin_map)) - __smp_prepare_cpu(cpu); -#endif + __smp_prepare_cpu(cpu); /* In case one didn't come up */ if (!cpu_isset(cpu, cpu_callin_map)) { -- cgit v1.2.1 From ddd10ecfa231c88382fc2f10a3120d2ad8e92381 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:44 -0300 Subject: x86: get rid of commenced mask. As we now boot cpus from cpu_up, we don't need it. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 978e13708ddb..c30abed08923 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -59,8 +59,6 @@ #include #include -static cpumask_t smp_commenced_mask; - /* which logical CPU number maps to which CPU (physical APIC ID) */ u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; @@ -180,8 +178,6 @@ static void __cpuinit start_secondary(void *unused) cpu_init(); preempt_disable(); smp_callin(); - while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) - cpu_relax(); /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); @@ -667,7 +663,6 @@ void cpu_exit_clear(void) cpu_clear(cpu, cpu_callout_map); cpu_clear(cpu, cpu_callin_map); - cpu_clear(cpu, smp_commenced_mask); unmap_cpu_to_logical_apicid(cpu); } #endif @@ -827,7 +822,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) void __init native_smp_prepare_cpus(unsigned int max_cpus) { nmi_watchdog_default(); - smp_commenced_mask = cpumask_of_cpu(0); cpu_callin_map = cpumask_of_cpu(0); mb(); smp_boot_cpus(max_cpus); @@ -869,8 +863,6 @@ int __cpuinit native_cpu_up(unsigned int cpu) return -EIO; } - /* Unleash the CPU! */ - cpu_set(cpu, smp_commenced_mask); /* * Check TSC synchronization with the AP (keep irqs disabled -- cgit v1.2.1 From 365c894c65b98da944992199ea24206f531674de Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:45 -0300 Subject: x86: use create_idle struct in do_boot_cpu Use a new worker, with help of the create_idle struct to fork the idle thread. We now have two workers, the first of them triggered by __smp_prepare_cpu. But the later is going away soon. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 86 ++++++++++++++++++++++++++++++-------------- 1 file changed, 59 insertions(+), 27 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index c30abed08923..fc1eb5255f66 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -79,6 +79,24 @@ static void map_cpu_to_logical_apicid(void); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif + static atomic_t init_deasserted; static void __cpuinit smp_callin(void) @@ -513,30 +531,21 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) extern cpumask_t cpu_initialized; -#ifdef CONFIG_HOTPLUG_CPU -static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; -static inline struct task_struct * __cpuinit alloc_idle_task(int cpu) -{ +struct create_idle { + struct work_struct work; struct task_struct *idle; + struct completion done; + int cpu; +}; - if ((idle = cpu_idle_tasks[cpu]) != NULL) { - /* initialize thread_struct. we really want to avoid destroy - * idle tread - */ - idle->thread.sp = (unsigned long)task_pt_regs(idle); - init_idle(idle, cpu); - return idle; - } - idle = fork_idle(cpu); +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); - if (!IS_ERR(idle)) - cpu_idle_tasks[cpu] = idle; - return idle; + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); } -#else -#define alloc_idle_task(cpu) fork_idle(cpu) -#endif - static int __cpuinit do_boot_cpu(int apicid, int cpu) /* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad @@ -544,11 +553,15 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. */ { - struct task_struct *idle; unsigned long boot_error; int timeout; unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK(&c_idle.work, do_fork_idle); /* * Save current MTRR state in case it was changed since early boot @@ -556,19 +569,38 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) */ mtrr_save_state(); + c_idle.idle = get_idle_for_cpu(cpu); + /* * We can't use kernel_thread since we must avoid to * reschedule the child. */ - idle = alloc_idle_task(cpu); - if (IS_ERR(idle)) - panic("failed fork for CPU %d", cpu); + if (c_idle.idle) { + c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) + (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); + init_idle(c_idle.idle, cpu); + goto do_rest; + } + + if (!keventd_up() || current_is_keventd()) + c_idle.work.func(&c_idle.work); + else { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } + + if (IS_ERR(c_idle.idle)) { + printk(KERN_ERR "failed fork for CPU %d\n", cpu); + return PTR_ERR(c_idle.idle); + } + set_idle_for_cpu(cpu, c_idle.idle); +do_rest: + per_cpu(current_task, cpu) = c_idle.idle; init_gdt(cpu); - per_cpu(current_task, cpu) = idle; early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); - idle->thread.ip = (unsigned long) start_secondary; + c_idle.idle->thread.ip = (unsigned long) start_secondary; /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); @@ -577,7 +609,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* So we see what's up */ printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); /* Stack for startup_32 can be just as for start_secondary onwards */ - stack_start.sp = (void *) idle->thread.sp; + stack_start.sp = (void *) c_idle.idle->thread.sp; irq_ctx_init(cpu); -- cgit v1.2.1 From 4c07ad6950c2c7077c6d60a3ce83fdbbb553bd65 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:46 -0300 Subject: x86: don't span a new worker in __smp_prepare_cpu We can do it now that do_boot_cpu has its own worker. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 30 ++---------------------------- 1 file changed, 2 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index fc1eb5255f66..c03596e11db8 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -699,44 +699,18 @@ void cpu_exit_clear(void) } #endif -struct warm_boot_cpu_info { - struct completion *complete; - struct work_struct task; - int apicid; - int cpu; -}; - -static void __cpuinit do_warm_boot_cpu(struct work_struct *work) -{ - struct warm_boot_cpu_info *info = - container_of(work, struct warm_boot_cpu_info, task); - do_boot_cpu(info->apicid, info->cpu); - complete(info->complete); -} - static void __cpuinit __smp_prepare_cpu(int cpu) { - DECLARE_COMPLETION_ONSTACK(done); - struct warm_boot_cpu_info info; int apicid; apicid = per_cpu(x86_cpu_to_apicid, cpu); - info.complete = &done; - info.apicid = apicid; - info.cpu = cpu; - INIT_WORK(&info.task, do_warm_boot_cpu); - /* init low mem mapping */ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); flush_tlb_all(); - if (!keventd_up() || current_is_keventd()) - info.task.func(&info.task); - else { - schedule_work(&info.task); - wait_for_completion(&done); - } + + do_boot_cpu(apicid, cpu); } static int boot_cpu_logical_apicid; -- cgit v1.2.1 From ea0cadbfed09674bcc2b3e1e7f2d7317ddde4e95 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:47 -0300 Subject: x86: modify smp_callin in x86_64 to look like i386 We introduce empty macros just to make them look like the same Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index b9384b3af017..e93fff42ec32 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -90,6 +90,9 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; static atomic_t init_deasserted __cpuinitdata; +#define smp_callin_clear_local_apic() do {} while (0) +#define map_cpu_to_logical_apicid() do {} while (0) + /* * Report back to the Boot Processor. * Running on AP. @@ -152,8 +155,10 @@ void __cpuinit smp_callin(void) */ Dprintk("CALLIN, before setup_local_APIC().\n"); + smp_callin_clear_local_apic(); setup_local_APIC(); end_local_APIC_setup(); + map_cpu_to_logical_apicid(); /* * Get our bogomips. -- cgit v1.2.1 From df7939ae8bee101d9d79d104e17f14b60845cf0f Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:48 -0300 Subject: x86: wrap esr setting up in i386 in lapic_setup_esr it is a little bit more complicated than x86_64 due to erratas and other stuff, but its existance will ease integration Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 73 ++++++++++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index c32cc0feb47b..80c81c76625a 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -897,12 +897,50 @@ void __init init_bsp_APIC(void) apic_write_around(APIC_LVT1, value); } +void __cpuinit lapic_setup_esr(void) +{ + unsigned long oldvalue, value, maxlvt; + if (lapic_is_integrated() && !esr_disable) { + /* !82489DX */ + maxlvt = lapic_get_maxlvt(); + if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ + apic_write(APIC_ESR, 0); + oldvalue = apic_read(APIC_ESR); + + /* enables sending errors */ + value = ERROR_APIC_VECTOR; + apic_write_around(APIC_LVTERR, value); + /* + * spec says clear errors after enabling vector. + */ + if (maxlvt > 3) + apic_write(APIC_ESR, 0); + value = apic_read(APIC_ESR); + if (value != oldvalue) + apic_printk(APIC_VERBOSE, "ESR value before enabling " + "vector: 0x%08lx after: 0x%08lx\n", + oldvalue, value); + } else { + if (esr_disable) + /* + * Something untraceable is creating bad interrupts on + * secondary quads ... for the moment, just leave the + * ESR disabled - we can't do anything useful with the + * errors anyway - mbligh + */ + printk(KERN_INFO "Leaving ESR disabled.\n"); + else + printk(KERN_INFO "No ESR for 82489DX.\n"); + } +} + + /** * setup_local_APIC - setup the local APIC */ void __cpuinit setup_local_APIC(void) { - unsigned long oldvalue, value, maxlvt, integrated; + unsigned long value, integrated; int i, j; /* Pound the ESR really hard over the head with a big hammer - mbligh */ @@ -1027,38 +1065,7 @@ void __cpuinit setup_local_APIC(void) value |= APIC_LVT_LEVEL_TRIGGER; apic_write_around(APIC_LVT1, value); - if (integrated && !esr_disable) { - /* !82489DX */ - maxlvt = lapic_get_maxlvt(); - if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ - apic_write(APIC_ESR, 0); - oldvalue = apic_read(APIC_ESR); - - /* enables sending errors */ - value = ERROR_APIC_VECTOR; - apic_write_around(APIC_LVTERR, value); - /* - * spec says clear errors after enabling vector. - */ - if (maxlvt > 3) - apic_write(APIC_ESR, 0); - value = apic_read(APIC_ESR); - if (value != oldvalue) - apic_printk(APIC_VERBOSE, "ESR value before enabling " - "vector: 0x%08lx after: 0x%08lx\n", - oldvalue, value); - } else { - if (esr_disable) - /* - * Something untraceable is creating bad interrupts on - * secondary quads ... for the moment, just leave the - * ESR disabled - we can't do anything useful with the - * errors anyway - mbligh - */ - printk(KERN_INFO "Leaving ESR disabled.\n"); - else - printk(KERN_INFO "No ESR for 82489DX.\n"); - } + lapic_setup_esr(); /* Disable the local apic timer */ value = apic_read(APIC_LVTT); -- cgit v1.2.1 From ac60aae561fff99d38beba82d84277b12437c05e Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:49 -0300 Subject: x86: provide an end_local_APIC_setup function It splits setup_local_APIC in two, providing a function corresponding to the ending part of it. As a side effect, smp_callin looks the same between i386 and x86_64. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 7 ++++++- arch/x86/kernel/smpboot_32.c | 3 +++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 80c81c76625a..6f506020bd7d 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1064,9 +1064,13 @@ void __cpuinit setup_local_APIC(void) if (!integrated) /* 82489DX */ value |= APIC_LVT_LEVEL_TRIGGER; apic_write_around(APIC_LVT1, value); +} - lapic_setup_esr(); +void __cpuinit end_local_APIC_setup(void) +{ + unsigned long value; + lapic_setup_esr(); /* Disable the local apic timer */ value = apic_read(APIC_LVTT); value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); @@ -1256,6 +1260,7 @@ int __init APIC_init_uniprocessor(void) setup_local_APIC(); + end_local_APIC_setup(); #ifdef CONFIG_X86_IO_APIC if (smp_found_config) if (!skip_ioapic_setup && nr_ioapics) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index c03596e11db8..dbfaeb30a69a 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -161,6 +161,7 @@ static void __cpuinit smp_callin(void) Dprintk("CALLIN, before setup_local_APIC().\n"); smp_callin_clear_local_apic(); setup_local_APIC(); + end_local_APIC_setup(); map_cpu_to_logical_apicid(); /* @@ -780,6 +781,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n"); connect_bsp_APIC(); setup_local_APIC(); + end_local_APIC_setup(); } return -1; } @@ -813,6 +815,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) connect_bsp_APIC(); setup_local_APIC(); + end_local_APIC_setup(); map_cpu_to_logical_apicid(); -- cgit v1.2.1 From e481fcf8563d300e7f8875cae5fdc41941d29de0 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:50 -0300 Subject: x86: calibrate delay with irqs enabled We do it to make it close to x86_64. The later needs it, otherwise the nmi watchdog can get into the scene and kill us with a hammer. Enabling irqs here used to trigger a bug in i386. This is because time irq handling relies upon structures that are only initialized after smp initcalls (More precisely, it will find per_cpu(hrtimer_bases, cpu)->cb_pending list not initialized and crash) Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index dbfaeb30a69a..bd2f8863efa2 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -167,7 +167,9 @@ static void __cpuinit smp_callin(void) /* * Get our bogomips. */ + local_irq_enable(); calibrate_delay(); + local_irq_disable(); Dprintk("Stack at about %p\n",&cpuid); /* -- cgit v1.2.1 From 6becedbb06072c5741d4057b9facecb4b3143711 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:51 -0300 Subject: x86: minor adjustments for do_boot_cpu This patch provides minor adjustments for do_boot_cpus in both architectures to allow for integration Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 22 ++++++++++++++-------- arch/x86/kernel/smpboot_64.c | 15 ++++++--------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index bd2f8863efa2..5165b11d8aac 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -556,7 +556,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. */ { - unsigned long boot_error; + unsigned long boot_error = 0; int timeout; unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; @@ -566,11 +566,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) }; INIT_WORK(&c_idle.work, do_fork_idle); - /* - * Save current MTRR state in case it was changed since early boot - * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: - */ - mtrr_save_state(); + alternatives_smp_switch(1); c_idle.idle = get_idle_for_cpu(cpu); @@ -607,8 +603,6 @@ do_rest: /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); - alternatives_smp_switch(1); - /* So we see what's up */ printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); /* Stack for startup_32 can be just as for start_secondary onwards */ @@ -628,6 +622,12 @@ do_rest: store_NMI_vector(&nmi_high, &nmi_low); smpboot_setup_warm_reset_vector(start_eip); + /* + * Be paranoid about clearing APIC errors. + */ + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + /* * Starting actual IPI sequence... @@ -864,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) return -EINVAL; } + /* + * Save current MTRR state in case it was changed since early boot + * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: + */ + mtrr_save_state(); + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __smp_prepare_cpu(cpu); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index e93fff42ec32..7d1b4cb380db 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -432,7 +432,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work) */ static int __cpuinit do_boot_cpu(int cpu, int apicid) { - unsigned long boot_error; + unsigned long boot_error = 0; int timeout; unsigned long start_rip; struct create_idle c_idle = { @@ -530,11 +530,6 @@ do_rest: apic_write(APIC_ESR, 0); apic_read(APIC_ESR); - /* - * Status is now clean - */ - boot_error = 0; - /* * Starting actual IPI sequence... */ @@ -564,7 +559,7 @@ do_rest: print_cpu_info(&cpu_data(cpu)); } else { boot_error = 1; - if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) + if (*((volatile unsigned char *)trampoline_base) == 0xA5) /* trampoline started but...? */ printk("Stuck ??\n"); @@ -583,10 +578,12 @@ do_rest: cpu_clear(cpu, cpu_present_map); cpu_clear(cpu, cpu_possible_map); per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; - return -EIO; } - return 0; + /* mark "stuck" area as not stuck */ + *((volatile unsigned long *)trampoline_base) = 0; + + return boot_error; } cycles_t cacheflush_time; -- cgit v1.2.1 From 4370ee4d3b7772158174bf6f0bf08359c2ccf54b Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:52 -0300 Subject: x86: call do_boot_cpu directly from native_cpu_up We don't need __smp_prepare_cpu anymore. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5165b11d8aac..4ba5ab2d81fb 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -702,20 +702,6 @@ void cpu_exit_clear(void) } #endif -static void __cpuinit __smp_prepare_cpu(int cpu) -{ - int apicid; - - apicid = per_cpu(x86_cpu_to_apicid, cpu); - - /* init low mem mapping */ - clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, - min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); - flush_tlb_all(); - - do_boot_cpu(apicid, cpu); -} - static int boot_cpu_logical_apicid; /* Where the IO area was mapped on multiquad, always 0 otherwise */ void *xquad_portio; @@ -872,7 +858,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - __smp_prepare_cpu(cpu); + /* init low mem mapping */ + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, + min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); + flush_tlb_all(); + + do_boot_cpu(apicid, cpu); /* In case one didn't come up */ if (!cpu_isset(cpu, cpu_callin_map)) { -- cgit v1.2.1 From f6bc40290964b5fcb48c226ccafa4b7536d62663 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:53 -0300 Subject: x86: include mach_apic.h in smpboot_64.c and smpboot.c After the inclusion, a lot of files needs fixing for conflicts, some of them in the headers themselves, to accomodate for both i386 and x86_64 versions. [ mingo@elte.hu: build fix ] Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 4 ++++ arch/x86/kernel/mpparse_64.c | 2 ++ arch/x86/kernel/smpboot.c | 2 ++ arch/x86/kernel/smpboot_64.c | 1 + arch/x86/vdso/Makefile | 2 +- include/asm-x86/apic.h | 1 - include/asm-x86/apicdef.h | 6 ------ include/asm-x86/mach-default/mach_apic.h | 11 +++++++++++ include/asm-x86/mach-default/mach_apicdef.h | 5 +++++ include/asm-x86/smp_64.h | 9 +-------- 10 files changed, 27 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 2cdc9de9371d..956b60f3ebd5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -40,6 +40,10 @@ #include #include +#ifdef CONFIG_X86_LOCAL_APIC +# include +#endif + static int __initdata acpi_force = 0; #ifdef CONFIG_ACPI diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 529b1c22077e..03ef1a8b53e8 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -30,6 +30,8 @@ #include #include +#include + /* Have we found an MP table */ int smp_found_config; unsigned int __cpuinitdata maxcpus = NR_CPUS; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6978f1bf6533..253be86a88e4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -11,6 +11,8 @@ #include #include +#include + /* Number of siblings per CPU package */ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 7d1b4cb380db..8a59fa80f883 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -61,6 +61,7 @@ #include #include +#include #include /* Set when the idlers are all forked */ diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 0a8f4742ef51..17a6b057856b 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -39,7 +39,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 -$(vobjs): KBUILD_CFLAGS = $(CFL) +$(vobjs): KBUILD_CFLAGS += $(CFL) targets += vdso-syms.lds obj-$(VDSO64-y) += vdso-syms.lds diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index db5f7501aed6..424e1dfe13c9 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -128,7 +128,6 @@ extern void enable_NMI_through_LVT0(void); * On 32bit this is mach-xxx local */ #ifdef CONFIG_X86_64 -extern void setup_apic_routing(void); extern void early_init_lapic_mapping(void); #endif diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index b0c6b285c916..674a2280e21e 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -12,12 +12,6 @@ #define APIC_ID 0x20 -#ifdef CONFIG_X86_64 -# define APIC_ID_MASK (0xFFu<<24) -# define GET_APIC_ID(x) (((x)>>24)&0xFFu) -# define SET_APIC_ID(x) (((x)<<24)) -#endif - #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFFu) diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index e3c2c1012c1c..e081bdccde2b 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -54,21 +54,27 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) return phys_map; } +#ifdef CONFIG_X86_64 +extern void setup_apic_routing(void); +#else static inline void setup_apic_routing(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); } +#endif static inline int multi_timer_check(int apic, int irq) { return 0; } +#ifdef CONFIG_X86_32 static inline int apicid_to_node(int logical_apicid) { return 0; } +#endif /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) @@ -78,8 +84,13 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { +#ifdef CONFIG_X86_64 + if (cpu_present(mps_cpu)) + return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); +#else if (mps_cpu < get_physical_broadcast()) return mps_cpu; +#endif else return BAD_APICID; } diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index ae9841319094..7b78275e6d33 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h @@ -3,7 +3,12 @@ #include +#ifdef CONFIG_X86_64 +#define APIC_ID_MASK (0xFFu<<24) +#define SET_APIC_ID(x) (((x)<<24)) +#else #define APIC_ID_MASK (0xF<<24) +#endif static inline unsigned get_apic_id(unsigned long x) { diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 1b3c0f1de9a9..be870a4881fc 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -19,14 +19,6 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (cpu_present(mps_cpu)) - return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); - else - return BAD_APICID; -} - #ifdef CONFIG_SMP #define raw_smp_processor_id() read_pda(cpunumber) @@ -64,6 +56,7 @@ static __inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } +#include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ -- cgit v1.2.1 From 071782692798d7a6e0a5679f3186ea7fea49fd62 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:54 -0300 Subject: x86: change wakeup_secondary name wakeup_secondary_via_INIT => wakeup_secondary_cpu. This is to match i386, where init is not always used. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 8a59fa80f883..7ec96218a97e 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -63,6 +63,7 @@ #include #include #include +#include /* Set when the idlers are all forked */ int smp_threads_ready; @@ -293,7 +294,8 @@ static void __inquire_remote_apic(int apicid) /* * Kick the secondary to wake up. */ -static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip) +static int __cpuinit wakeup_secondary_cpu(int phys_apicid, + unsigned int start_rip) { unsigned long send_status, accept_status = 0; int maxlvt, num_starts, j; @@ -534,7 +536,7 @@ do_rest: /* * Starting actual IPI sequence... */ - boot_error = wakeup_secondary_via_INIT(apicid, start_rip); + boot_error = wakeup_secondary_cpu(apicid, start_rip); if (!boot_error) { /* -- cgit v1.2.1 From b9f9294a86fd274e4055891450033e8bc9d68f66 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:55 -0300 Subject: x86: add callin tests to cpu_up Now that we boot cpus here, callin_map has this meaning (same as x86_64) Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 4ba5ab2d81fb..33758a2ddd48 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -839,6 +839,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) { int apicid = cpu_present_to_apicid(cpu); unsigned long flags; + int err; WARN_ON(irqs_disabled()); @@ -850,6 +851,14 @@ int __cpuinit native_cpu_up(unsigned int cpu) return -EINVAL; } + /* + * Already booted CPU? + */ + if (cpu_isset(cpu, cpu_callin_map)) { + Dprintk("do_boot_cpu %d Already started\n", cpu); + return -ENOSYS; + } + /* * Save current MTRR state in case it was changed since early boot * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: @@ -863,15 +872,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); flush_tlb_all(); - do_boot_cpu(apicid, cpu); - - /* In case one didn't come up */ - if (!cpu_isset(cpu, cpu_callin_map)) { - printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); - return -EIO; + err = do_boot_cpu(apicid, cpu); + if (err < 0) { + Dprintk("do_boot_cpu failed %d\n", err); + return err; } - /* * Check TSC synchronization with the AP (keep irqs disabled * while doing so): -- cgit v1.2.1 From 7cc3959ecd830796231f50bf5e42dc018b3694f2 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:56 -0300 Subject: x86: move {un}map_cpu_to_logical_apicid to smpboot.c Move map_cpu_to_logical_apicid() and unmap_cpu_to_logical_apicid() to smpboot.c. They take together all the bunch of static functions they rely upon Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 60 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 59 ++----------------------------------------- 2 files changed, 62 insertions(+), 57 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 253be86a88e4..5bff87e99898 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -50,6 +50,66 @@ static cpumask_t cpu_sibling_setup_map; /* Set if we find a B stepping CPU */ int __cpuinitdata smp_b_stepping; +#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) + +/* which logical CPUs are on which nodes */ +cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = + { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; +EXPORT_SYMBOL(node_to_cpumask_map); +/* which node each logical CPU is on */ +int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; +EXPORT_SYMBOL(cpu_to_node_map); + +/* set up a mapping between cpu and node. */ +static void map_cpu_to_node(int cpu, int node) +{ + printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); + cpu_set(cpu, node_to_cpumask_map[node]); + cpu_to_node_map[cpu] = node; +} + +/* undo a mapping between cpu and node. */ +static void unmap_cpu_to_node(int cpu) +{ + int node; + + printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); + for (node = 0; node < MAX_NUMNODES; node++) + cpu_clear(cpu, node_to_cpumask_map[node]); + cpu_to_node_map[cpu] = 0; +} +#else /* !(CONFIG_NUMA && CONFIG_X86_32) */ +#define map_cpu_to_node(cpu, node) ({}) +#define unmap_cpu_to_node(cpu) ({}) +#endif + +#ifdef CONFIG_X86_32 +u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = BAD_APICID }; + +void map_cpu_to_logical_apicid(void) +{ + int cpu = smp_processor_id(); + int apicid = logical_smp_processor_id(); + int node = apicid_to_node(apicid); + + if (!node_online(node)) + node = first_online_node; + + cpu_2_logical_apicid[cpu] = apicid; + map_cpu_to_node(cpu, node); +} + +void unmap_cpu_to_logical_apicid(int cpu) +{ + cpu_2_logical_apicid[cpu] = BAD_APICID; + unmap_cpu_to_node(cpu); +} +#else +#define unmap_cpu_to_logical_apicid(cpu) do {} while (0) +#define map_cpu_to_logical_apicid() do {} while (0) +#endif + static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 33758a2ddd48..1eb7b73b45a3 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -74,7 +74,8 @@ EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); u8 apicid_2_node[MAX_APICID]; -static void map_cpu_to_logical_apicid(void); +extern void map_cpu_to_logical_apicid(void); +extern void unmap_cpu_to_logical_apicid(int cpu); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; @@ -262,62 +263,6 @@ extern struct { unsigned short ss; } stack_start; -#ifdef CONFIG_NUMA - -/* which logical CPUs are on which nodes */ -cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = - { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; -EXPORT_SYMBOL(node_to_cpumask_map); -/* which node each logical CPU is on */ -int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; -EXPORT_SYMBOL(cpu_to_node_map); - -/* set up a mapping between cpu and node. */ -static inline void map_cpu_to_node(int cpu, int node) -{ - printk("Mapping cpu %d to node %d\n", cpu, node); - cpu_set(cpu, node_to_cpumask_map[node]); - cpu_to_node_map[cpu] = node; -} - -/* undo a mapping between cpu and node. */ -static inline void unmap_cpu_to_node(int cpu) -{ - int node; - - printk("Unmapping cpu %d from all nodes\n", cpu); - for (node = 0; node < MAX_NUMNODES; node ++) - cpu_clear(cpu, node_to_cpumask_map[node]); - cpu_to_node_map[cpu] = 0; -} -#else /* !CONFIG_NUMA */ - -#define map_cpu_to_node(cpu, node) ({}) -#define unmap_cpu_to_node(cpu) ({}) - -#endif /* CONFIG_NUMA */ - -u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; - -static void map_cpu_to_logical_apicid(void) -{ - int cpu = smp_processor_id(); - int apicid = logical_smp_processor_id(); - int node = apicid_to_node(apicid); - - if (!node_online(node)) - node = first_online_node; - - cpu_2_logical_apicid[cpu] = apicid; - map_cpu_to_node(cpu, node); -} - -static void unmap_cpu_to_logical_apicid(int cpu) -{ - cpu_2_logical_apicid[cpu] = BAD_APICID; - unmap_cpu_to_node(cpu); -} - static inline void __inquire_remote_apic(int apicid) { unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; -- cgit v1.2.1 From 9d97d0da71ad6c7ceb76b4e29b02bed1ee9d4cd2 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:57 -0300 Subject: x86: move stack_start to smp.h voyager would conflict with it, but the types are ultimately compatible. So remove the extern definition from voyager_smp.c in favour of the common one Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 6 ------ arch/x86/mach-voyager/voyager_smp.c | 7 ------- include/asm-x86/smp.h | 7 +++++++ 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 1eb7b73b45a3..ae25927f08c1 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -257,12 +257,6 @@ void __devinit initialize_secondary(void) :"m" (current->thread.sp),"m" (current->thread.ip)); } -/* Static state in head.S used to set up a CPU */ -extern struct { - void * sp; - unsigned short ss; -} stack_start; - static inline void __inquire_remote_apic(int apicid) { unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 11b806c7ed94..4397235c2e30 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -520,13 +520,6 @@ static void __init do_boot_cpu(__u8 cpu) & ~(voyager_extended_vic_processors & voyager_allowed_boot_processors); - /* This is an area in head.S which was used to set up the - * initial kernel stack. We need to alter this to give the - * booting CPU a new stack (taken from its idle process) */ - extern struct { - __u8 *sp; - unsigned short ss; - } stack_start; /* This is the format of the CPI IDT gate (in real mode) which * we're hijacking to boot the CPU */ union IDTFormat { diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d02e6eacee39..78ef16dd194b 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -28,6 +28,13 @@ extern const unsigned char trampoline_data []; extern const unsigned char trampoline_end []; extern unsigned char *trampoline_base; +/* Static state in head.S used to set up a CPU */ +extern struct { + void *sp; + unsigned short ss; +} stack_start; + + struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); -- cgit v1.2.1 From c70dcb74309cedfa64f0060f4a84792e873ceb53 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:58 -0300 Subject: x86: change boot_cpu_id to boot_cpu_physical_apicid This is to match i386. The former name was cuter, but the current is more meaningful and more general, since cpu_id can be a logical id. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 13 +++++++------ arch/x86/kernel/mpparse_64.c | 12 ++++++------ arch/x86/kernel/smpboot_64.c | 18 ++++++++++-------- include/asm-x86/apic.h | 1 - include/asm-x86/smp.h | 3 +++ include/asm-x86/smp_32.h | 5 ----- include/asm-x86/smp_64.h | 4 ---- 7 files changed, 26 insertions(+), 30 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 8a475793f736..868ec1deb19a 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -431,7 +431,8 @@ void __cpuinit check_boot_apic_timer_broadcast(void) lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; local_irq_enable(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, + &boot_cpu_physical_apicid); local_irq_disable(); } @@ -857,7 +858,7 @@ static int __init detect_init_APIC(void) } mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - boot_cpu_id = 0; + boot_cpu_physical_apicid = 0; return 0; } @@ -882,7 +883,7 @@ void __init early_init_lapic_mapping(void) * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ - boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); } /** @@ -909,7 +910,7 @@ void __init init_apic_mappings(void) * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ - boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); } /* @@ -930,8 +931,8 @@ int __init APIC_init_uniprocessor(void) verify_local_APIC(); - phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); - apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id)); + phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); + apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); setup_local_APIC(); diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 03ef1a8b53e8..20a345dd425b 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -59,8 +59,8 @@ unsigned long mp_lapic_addr = 0; /* Processor that is doing the boot up */ -unsigned int boot_cpu_id = -1U; -EXPORT_SYMBOL(boot_cpu_id); +unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL(boot_cpu_physical_apicid); /* Internal processor count */ unsigned int num_processors; @@ -107,7 +107,7 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) } if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { bootup_cpu = " (Bootup-CPU)"; - boot_cpu_id = m->mpc_apicid; + boot_cpu_physical_apicid = m->mpc_apicid; } printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); @@ -665,8 +665,8 @@ void __init mp_register_lapic_address(u64 address) { mp_lapic_addr = (unsigned long) address; set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); - if (boot_cpu_id == -1U) - boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); } void __cpuinit mp_register_lapic (u8 id, u8 enabled) @@ -674,7 +674,7 @@ void __cpuinit mp_register_lapic (u8 id, u8 enabled) struct mpc_config_processor processor; int boot_cpu = 0; - if (id == boot_cpu_id) + if (id == boot_cpu_physical_apicid) boot_cpu = 1; processor.mpc_type = MP_PROCESSOR; diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 7ec96218a97e..420ae4a33548 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -602,7 +602,8 @@ static __init void disable_smp(void) cpu_present_map = cpumask_of_cpu(0); cpu_possible_map = cpumask_of_cpu(0); if (smp_found_config) - phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); + phys_cpu_present_map = + physid_mask_of_physid(boot_cpu_physical_apicid); else phys_cpu_present_map = physid_mask_of_physid(0); cpu_set(0, per_cpu(cpu_sibling_map, 0)); @@ -637,9 +638,10 @@ static int __init smp_sanity_check(unsigned max_cpus) * Should not be necessary because the MP table should list the boot * CPU too, but we do it for the sake of robustness anyway. */ - if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) { - printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n", - boot_cpu_id); + if (!physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map)) { + printk(KERN_NOTICE + "weird, boot CPU (#%d) not listed by the BIOS.\n", + boot_cpu_physical_apicid); physid_set(hard_smp_processor_id(), phys_cpu_present_map); } @@ -648,7 +650,7 @@ static int __init smp_sanity_check(unsigned max_cpus) */ if (!cpu_has_apic) { printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", - boot_cpu_id); + boot_cpu_physical_apicid); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); nr_ioapics = 0; return -1; @@ -709,9 +711,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) enable_IO_APIC(); end_local_APIC_setup(); - if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) { + if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { panic("Boot APIC ID in local APIC unexpected (%d vs %d)", - GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id); + GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); /* Or can we switch back to PIC here? */ } @@ -756,7 +758,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); - if (apicid == BAD_APICID || apicid == boot_cpu_id || + if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || !physid_isset(apicid, phys_cpu_present_map)) { printk("__cpu_up: bad cpu %d\n", cpu); return -EINVAL; diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 424e1dfe13c9..b804238489a1 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -44,7 +44,6 @@ extern int apic_runs_main_timer; extern int ioapic_force; extern int disable_apic; extern int disable_apic_timer; -extern unsigned boot_cpu_id; /* * Basic functions accessing APICs. diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 78ef16dd194b..2ad2f4ffe498 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -109,6 +109,9 @@ extern void prefill_possible_map(void); extern unsigned long setup_trampoline(void); void smp_store_cpu_info(int id); +#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +#else +#define cpu_physical_id(cpu) boot_cpu_physical_apicid #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 478f5564630f..f861d0415171 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -30,8 +30,6 @@ extern void zap_low_mappings (void); DECLARE_PER_CPU(int, cpu_number); #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) - extern int safe_smp_processor_id(void); /* We don't mark CPUs online until __cpu_up(), so we need another measure */ @@ -41,10 +39,7 @@ static inline int num_booting_cpus(void) } #else /* CONFIG_SMP */ - #define safe_smp_processor_id() 0 -#define cpu_physical_id(cpu) boot_cpu_physical_apicid - #endif /* !CONFIG_SMP */ #ifdef CONFIG_X86_LOCAL_APIC diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index be870a4881fc..fd709cbba4d1 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -22,7 +22,6 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), #ifdef CONFIG_SMP #define raw_smp_processor_id() read_pda(cpunumber) -#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define stack_smp_processor_id() \ ({ \ @@ -41,9 +40,6 @@ static inline int num_booting_cpus(void) } #else /* CONFIG_SMP */ - -extern unsigned int boot_cpu_id; -#define cpu_physical_id(cpu) boot_cpu_id #define stack_smp_processor_id() 0 #endif /* !CONFIG_SMP */ -- cgit v1.2.1 From cb3c8b9003f15efa4a750a32d2d602d40cc45d5a Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:59 -0300 Subject: x86: integrate do_boot_cpu This is a very large patch, because it depends on a lot of auxiliary static functions. But they all have been modified to the point that they're sufficiently close now. So they're just merged in smpboot.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 588 +++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 532 +-------------------------------------- arch/x86/kernel/smpboot_64.c | 515 +------------------------------------ include/asm-x86/smp.h | 3 + 4 files changed, 594 insertions(+), 1044 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5bff87e99898..69c17965f48d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -4,14 +4,42 @@ #include #include #include +#include +#include +#include #include #include #include #include #include +#include +#include +#include +#include +#include #include +#include +#include + +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -41,6 +69,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); +static atomic_t init_deasserted; + /* ready for x86_64, no harm for x86, since it will overwrite after alloc */ unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); @@ -110,6 +140,96 @@ void unmap_cpu_to_logical_apicid(int cpu) #define map_cpu_to_logical_apicid() do {} while (0) #endif +/* + * Report back to the Boot Processor. + * Running on AP. + */ +void __cpuinit smp_callin(void) +{ + int cpuid, phys_id; + unsigned long timeout; + + /* + * If waken up by an INIT in an 82489DX configuration + * we may get here before an INIT-deassert IPI reaches + * our local APIC. We have to wait for the IPI or we'll + * lock up on an APIC access. + */ + wait_for_init_deassert(&init_deasserted); + + /* + * (This works even if the APIC is not enabled.) + */ + phys_id = GET_APIC_ID(apic_read(APIC_ID)); + cpuid = smp_processor_id(); + if (cpu_isset(cpuid, cpu_callin_map)) { + panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, + phys_id, cpuid); + } + Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); + + /* + * STARTUP IPIs are fragile beasts as they might sometimes + * trigger some glue motherboard logic. Complete APIC bus + * silence for 1 second, this overestimates the time the + * boot CPU is spending to send the up to 2 STARTUP IPIs + * by a factor of two. This should be enough. + */ + + /* + * Waiting 2s total for startup (udelay is not yet working) + */ + timeout = jiffies + 2*HZ; + while (time_before(jiffies, timeout)) { + /* + * Has the boot CPU finished it's STARTUP sequence? + */ + if (cpu_isset(cpuid, cpu_callout_map)) + break; + cpu_relax(); + } + + if (!time_before(jiffies, timeout)) { + panic("%s: CPU%d started up but did not get a callout!\n", + __func__, cpuid); + } + + /* + * the boot CPU has finished the init stage and is spinning + * on callin_map until we finish. We are free to set up this + * CPU, first the APIC. (this is probably redundant on most + * boards) + */ + + Dprintk("CALLIN, before setup_local_APIC().\n"); + smp_callin_clear_local_apic(); + setup_local_APIC(); + end_local_APIC_setup(); + map_cpu_to_logical_apicid(); + + /* + * Get our bogomips. + * + * Need to enable IRQs because it can take longer and then + * the NMI watchdog might kill us. + */ + local_irq_enable(); + calibrate_delay(); + local_irq_disable(); + Dprintk("Stack at about %p\n", &cpuid); + + /* + * Save our processor parameters + */ + smp_store_cpu_info(cpuid); + + /* + * Allow the master to continue. + */ + cpu_set(cpuid, cpu_callin_map); +} + + static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 @@ -327,6 +447,474 @@ void impress_friends(void) Dprintk("Before bogocount - setting activated=1.\n"); } +static inline void __inquire_remote_apic(int apicid) +{ + unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; + char *names[] = { "ID", "VERSION", "SPIV" }; + int timeout; + u32 status; + + printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); + + /* + * Wait for idle. + */ + status = safe_apic_wait_icr_idle(); + if (status) + printk(KERN_CONT + "a previous APIC delivery may have failed\n"); + + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); + apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); + + timeout = 0; + do { + udelay(100); + status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; + } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); + + switch (status) { + case APIC_ICR_RR_VALID: + status = apic_read(APIC_RRR); + printk(KERN_CONT "%08x\n", status); + break; + default: + printk(KERN_CONT "failed\n"); + } + } +} + +#ifdef WAKE_SECONDARY_VIA_NMI +/* + * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal + * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this + * won't ... remember to clear down the APIC, etc later. + */ +static int __devinit +wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) +{ + unsigned long send_status, accept_status = 0; + int maxlvt; + + /* Target chip */ + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); + + /* Boot on the stack */ + /* Kick the second */ + apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); + + Dprintk("Waiting for send to finish...\n"); + send_status = safe_apic_wait_icr_idle(); + + /* + * Give the other CPU some time to accept the IPI. + */ + udelay(200); + /* + * Due to the Pentium erratum 3AP. + */ + maxlvt = lapic_get_maxlvt(); + if (maxlvt > 3) { + apic_read_around(APIC_SPIV); + apic_write(APIC_ESR, 0); + } + accept_status = (apic_read(APIC_ESR) & 0xEF); + Dprintk("NMI sent.\n"); + + if (send_status) + printk(KERN_ERR "APIC never delivered???\n"); + if (accept_status) + printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); + + return (send_status | accept_status); +} +#endif /* WAKE_SECONDARY_VIA_NMI */ + +extern void start_secondary(void *unused); +#ifdef WAKE_SECONDARY_VIA_INIT +static int __devinit +wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) +{ + unsigned long send_status, accept_status = 0; + int maxlvt, num_starts, j; + + /* + * Be paranoid about clearing APIC errors. + */ + if (APIC_INTEGRATED(apic_version[phys_apicid])) { + apic_read_around(APIC_SPIV); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + } + + Dprintk("Asserting INIT.\n"); + + /* + * Turn INIT on target chip + */ + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); + + /* + * Send IPI + */ + apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT + | APIC_DM_INIT); + + Dprintk("Waiting for send to finish...\n"); + send_status = safe_apic_wait_icr_idle(); + + mdelay(10); + + Dprintk("Deasserting INIT.\n"); + + /* Target chip */ + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); + + /* Send IPI */ + apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); + + Dprintk("Waiting for send to finish...\n"); + send_status = safe_apic_wait_icr_idle(); + + mb(); + atomic_set(&init_deasserted, 1); + + /* + * Should we send STARTUP IPIs ? + * + * Determine this based on the APIC version. + * If we don't have an integrated APIC, don't send the STARTUP IPIs. + */ + if (APIC_INTEGRATED(apic_version[phys_apicid])) + num_starts = 2; + else + num_starts = 0; + + /* + * Paravirt / VMI wants a startup IPI hook here to set up the + * target processor state. + */ + startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, +#ifdef CONFIG_X86_64 + (unsigned long)init_rsp); +#else + (unsigned long)stack_start.sp); +#endif + + /* + * Run STARTUP IPI loop. + */ + Dprintk("#startup loops: %d.\n", num_starts); + + maxlvt = lapic_get_maxlvt(); + + for (j = 1; j <= num_starts; j++) { + Dprintk("Sending STARTUP #%d.\n", j); + apic_read_around(APIC_SPIV); + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + Dprintk("After apic_write.\n"); + + /* + * STARTUP IPI + */ + + /* Target chip */ + apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); + + /* Boot on the stack */ + /* Kick the second */ + apic_write_around(APIC_ICR, APIC_DM_STARTUP + | (start_eip >> 12)); + + /* + * Give the other CPU some time to accept the IPI. + */ + udelay(300); + + Dprintk("Startup point 1.\n"); + + Dprintk("Waiting for send to finish...\n"); + send_status = safe_apic_wait_icr_idle(); + + /* + * Give the other CPU some time to accept the IPI. + */ + udelay(200); + /* + * Due to the Pentium erratum 3AP. + */ + if (maxlvt > 3) { + apic_read_around(APIC_SPIV); + apic_write(APIC_ESR, 0); + } + accept_status = (apic_read(APIC_ESR) & 0xEF); + if (send_status || accept_status) + break; + } + Dprintk("After Startup.\n"); + + if (send_status) + printk(KERN_ERR "APIC never delivered???\n"); + if (accept_status) + printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); + + return (send_status | accept_status); +} +#endif /* WAKE_SECONDARY_VIA_INIT */ + +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +static int __cpuinit do_boot_cpu(int apicid, int cpu) +/* + * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad + * (ie clustered apic addressing mode), this is a LOGICAL apic ID. + * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. + */ +{ + unsigned long boot_error = 0; + int timeout; + unsigned long start_ip; + unsigned short nmi_high = 0, nmi_low = 0; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK(&c_idle.work, do_fork_idle); +#ifdef CONFIG_X86_64 + /* allocate memory for gdts of secondary cpus. Hotplug is considered */ + if (!cpu_gdt_descr[cpu].address && + !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { + printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); + return -1; + } + + /* Allocate node local memory for AP pdas */ + if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { + struct x8664_pda *newpda, *pda; + int node = cpu_to_node(cpu); + pda = cpu_pda(cpu); + newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC, + node); + if (newpda) { + memcpy(newpda, pda, sizeof(struct x8664_pda)); + cpu_pda(cpu) = newpda; + } else + printk(KERN_ERR + "Could not allocate node local PDA for CPU %d on node %d\n", + cpu, node); + } +#endif + + alternatives_smp_switch(1); + + c_idle.idle = get_idle_for_cpu(cpu); + + /* + * We can't use kernel_thread since we must avoid to + * reschedule the child. + */ + if (c_idle.idle) { + c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) + (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); + init_idle(c_idle.idle, cpu); + goto do_rest; + } + + if (!keventd_up() || current_is_keventd()) + c_idle.work.func(&c_idle.work); + else { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } + + if (IS_ERR(c_idle.idle)) { + printk("failed fork for CPU %d\n", cpu); + return PTR_ERR(c_idle.idle); + } + + set_idle_for_cpu(cpu, c_idle.idle); +do_rest: +#ifdef CONFIG_X86_32 + per_cpu(current_task, cpu) = c_idle.idle; + init_gdt(cpu); + early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + c_idle.idle->thread.ip = (unsigned long) start_secondary; + /* Stack for startup_32 can be just as for start_secondary onwards */ + stack_start.sp = (void *) c_idle.idle->thread.sp; + irq_ctx_init(cpu); +#else + cpu_pda(cpu)->pcurrent = c_idle.idle; + init_rsp = c_idle.idle->thread.sp; + load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); + initial_code = (unsigned long)start_secondary; + clear_tsk_thread_flag(c_idle.idle, TIF_FORK); +#endif + + /* start_ip had better be page-aligned! */ + start_ip = setup_trampoline(); + + /* So we see what's up */ + printk(KERN_INFO "Booting processor %d/%d ip %lx\n", + cpu, apicid, start_ip); + + /* + * This grunge runs the startup process for + * the targeted processor. + */ + + atomic_set(&init_deasserted, 0); + + Dprintk("Setting warm reset code and vector.\n"); + + store_NMI_vector(&nmi_high, &nmi_low); + + smpboot_setup_warm_reset_vector(start_ip); + /* + * Be paranoid about clearing APIC errors. + */ + apic_write(APIC_ESR, 0); + apic_read(APIC_ESR); + + + /* + * Starting actual IPI sequence... + */ + boot_error = wakeup_secondary_cpu(apicid, start_ip); + + if (!boot_error) { + /* + * allow APs to start initializing. + */ + Dprintk("Before Callout %d.\n", cpu); + cpu_set(cpu, cpu_callout_map); + Dprintk("After Callout %d.\n", cpu); + + /* + * Wait 5s total for a response + */ + for (timeout = 0; timeout < 50000; timeout++) { + if (cpu_isset(cpu, cpu_callin_map)) + break; /* It has booted */ + udelay(100); + } + + if (cpu_isset(cpu, cpu_callin_map)) { + /* number CPUs logically, starting from 1 (BSP is 0) */ + Dprintk("OK.\n"); + printk(KERN_INFO "CPU%d: ", cpu); + print_cpu_info(&cpu_data(cpu)); + Dprintk("CPU has booted.\n"); + } else { + boot_error = 1; + if (*((volatile unsigned char *)trampoline_base) + == 0xA5) + /* trampoline started but...? */ + printk(KERN_ERR "Stuck ??\n"); + else + /* trampoline code not run */ + printk(KERN_ERR "Not responding.\n"); + inquire_remote_apic(apicid); + } + } + + if (boot_error) { + /* Try to put things back the way they were before ... */ + unmap_cpu_to_logical_apicid(cpu); +#ifdef CONFIG_X86_64 + clear_node_cpumask(cpu); /* was set by numa_add_cpu */ +#endif + cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ + cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ + cpu_clear(cpu, cpu_possible_map); + cpu_clear(cpu, cpu_present_map); + per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; + } + + /* mark "stuck" area as not stuck */ + *((volatile unsigned long *)trampoline_base) = 0; + + return boot_error; +} + +int __cpuinit native_cpu_up(unsigned int cpu) +{ + int apicid = cpu_present_to_apicid(cpu); + unsigned long flags; + int err; + + WARN_ON(irqs_disabled()); + + Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); + + if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || + !physid_isset(apicid, phys_cpu_present_map)) { + printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); + return -EINVAL; + } + + /* + * Already booted CPU? + */ + if (cpu_isset(cpu, cpu_callin_map)) { + Dprintk("do_boot_cpu %d Already started\n", cpu); + return -ENOSYS; + } + + /* + * Save current MTRR state in case it was changed since early boot + * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: + */ + mtrr_save_state(); + + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + +#ifdef CONFIG_X86_32 + /* init low mem mapping */ + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, + min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); + flush_tlb_all(); +#endif + + err = do_boot_cpu(apicid, cpu); + if (err < 0) { + Dprintk("do_boot_cpu failed %d\n", err); + return err; + } + + /* + * Check TSC synchronization with the AP (keep irqs disabled + * while doing so): + */ + local_irq_save(flags); + check_tsc_sync_source(cpu); + local_irq_restore(flags); + + while (!cpu_isset(cpu, cpu_online_map)) { + cpu_relax(); + touch_nmi_watchdog(); + } + + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) { diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index ae25927f08c1..e82eeb2fdfef 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -80,114 +80,12 @@ extern void unmap_cpu_to_logical_apicid(int cpu); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -/* Store all idle threads, this can be reused instead of creating -* a new thread. Also avoids complicated thread destroy functionality -* for idle threads. -*/ -#ifdef CONFIG_HOTPLUG_CPU -/* - * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is - * removed after init for !CONFIG_HOTPLUG_CPU. - */ -static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); -#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) -#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) -#else -struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; -#define get_idle_for_cpu(x) (idle_thread_array[(x)]) -#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) -#endif - -static atomic_t init_deasserted; - -static void __cpuinit smp_callin(void) -{ - int cpuid, phys_id; - unsigned long timeout; - - /* - * If waken up by an INIT in an 82489DX configuration - * we may get here before an INIT-deassert IPI reaches - * our local APIC. We have to wait for the IPI or we'll - * lock up on an APIC access. - */ - wait_for_init_deassert(&init_deasserted); - - /* - * (This works even if the APIC is not enabled.) - */ - phys_id = GET_APIC_ID(apic_read(APIC_ID)); - cpuid = smp_processor_id(); - if (cpu_isset(cpuid, cpu_callin_map)) { - printk("huh, phys CPU#%d, CPU#%d already present??\n", - phys_id, cpuid); - BUG(); - } - Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); - - /* - * STARTUP IPIs are fragile beasts as they might sometimes - * trigger some glue motherboard logic. Complete APIC bus - * silence for 1 second, this overestimates the time the - * boot CPU is spending to send the up to 2 STARTUP IPIs - * by a factor of two. This should be enough. - */ - - /* - * Waiting 2s total for startup (udelay is not yet working) - */ - timeout = jiffies + 2*HZ; - while (time_before(jiffies, timeout)) { - /* - * Has the boot CPU finished it's STARTUP sequence? - */ - if (cpu_isset(cpuid, cpu_callout_map)) - break; - cpu_relax(); - } - - if (!time_before(jiffies, timeout)) { - printk("BUG: CPU%d started up but did not get a callout!\n", - cpuid); - BUG(); - } - - /* - * the boot CPU has finished the init stage and is spinning - * on callin_map until we finish. We are free to set up this - * CPU, first the APIC. (this is probably redundant on most - * boards) - */ - - Dprintk("CALLIN, before setup_local_APIC().\n"); - smp_callin_clear_local_apic(); - setup_local_APIC(); - end_local_APIC_setup(); - map_cpu_to_logical_apicid(); - - /* - * Get our bogomips. - */ - local_irq_enable(); - calibrate_delay(); - local_irq_disable(); - Dprintk("Stack at about %p\n",&cpuid); - - /* - * Save our processor parameters - */ - smp_store_cpu_info(cpuid); - - /* - * Allow the master to continue. - */ - cpu_set(cpuid, cpu_callin_map); -} +extern void smp_callin(void); /* * Activate a secondary processor. */ -static void __cpuinit start_secondary(void *unused) +void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too @@ -257,373 +155,6 @@ void __devinit initialize_secondary(void) :"m" (current->thread.sp),"m" (current->thread.ip)); } -static inline void __inquire_remote_apic(int apicid) -{ - unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; - char *names[] = { "ID", "VERSION", "SPIV" }; - int timeout; - u32 status; - - printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); - - for (i = 0; i < ARRAY_SIZE(regs); i++) { - printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); - - /* - * Wait for idle. - */ - status = safe_apic_wait_icr_idle(); - if (status) - printk(KERN_CONT - "a previous APIC delivery may have failed\n"); - - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); - apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); - - timeout = 0; - do { - udelay(100); - status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; - } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); - - switch (status) { - case APIC_ICR_RR_VALID: - status = apic_read(APIC_RRR); - printk(KERN_CONT "%08x\n", status); - break; - default: - printk(KERN_CONT "failed\n"); - } - } -} - -#ifdef WAKE_SECONDARY_VIA_NMI -/* - * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal - * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this - * won't ... remember to clear down the APIC, etc later. - */ -static int __devinit -wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) -{ - unsigned long send_status, accept_status = 0; - int maxlvt; - - /* Target chip */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); - - /* Boot on the stack */ - /* Kick the second */ - apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - /* - * Give the other CPU some time to accept the IPI. - */ - udelay(200); - /* - * Due to the Pentium erratum 3AP. - */ - maxlvt = lapic_get_maxlvt(); - if (maxlvt > 3) { - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - } - accept_status = (apic_read(APIC_ESR) & 0xEF); - Dprintk("NMI sent.\n"); - - if (send_status) - printk("APIC never delivered???\n"); - if (accept_status) - printk("APIC delivery error (%lx).\n", accept_status); - - return (send_status | accept_status); -} -#endif /* WAKE_SECONDARY_VIA_NMI */ - -#ifdef WAKE_SECONDARY_VIA_INIT -static int __devinit -wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) -{ - unsigned long send_status, accept_status = 0; - int maxlvt, num_starts, j; - - /* - * Be paranoid about clearing APIC errors. - */ - if (APIC_INTEGRATED(apic_version[phys_apicid])) { - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - } - - Dprintk("Asserting INIT.\n"); - - /* - * Turn INIT on target chip - */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); - - /* - * Send IPI - */ - apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT - | APIC_DM_INIT); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - mdelay(10); - - Dprintk("Deasserting INIT.\n"); - - /* Target chip */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); - - /* Send IPI */ - apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - mb(); - atomic_set(&init_deasserted, 1); - - /* - * Should we send STARTUP IPIs ? - * - * Determine this based on the APIC version. - * If we don't have an integrated APIC, don't send the STARTUP IPIs. - */ - if (APIC_INTEGRATED(apic_version[phys_apicid])) - num_starts = 2; - else - num_starts = 0; - - /* - * Paravirt / VMI wants a startup IPI hook here to set up the - * target processor state. - */ - startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, - (unsigned long) stack_start.sp); - - /* - * Run STARTUP IPI loop. - */ - Dprintk("#startup loops: %d.\n", num_starts); - - maxlvt = lapic_get_maxlvt(); - - for (j = 1; j <= num_starts; j++) { - Dprintk("Sending STARTUP #%d.\n",j); - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - Dprintk("After apic_write.\n"); - - /* - * STARTUP IPI - */ - - /* Target chip */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); - - /* Boot on the stack */ - /* Kick the second */ - apic_write_around(APIC_ICR, APIC_DM_STARTUP - | (start_eip >> 12)); - - /* - * Give the other CPU some time to accept the IPI. - */ - udelay(300); - - Dprintk("Startup point 1.\n"); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - /* - * Give the other CPU some time to accept the IPI. - */ - udelay(200); - /* - * Due to the Pentium erratum 3AP. - */ - if (maxlvt > 3) { - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - } - accept_status = (apic_read(APIC_ESR) & 0xEF); - if (send_status || accept_status) - break; - } - Dprintk("After Startup.\n"); - - if (send_status) - printk("APIC never delivered???\n"); - if (accept_status) - printk("APIC delivery error (%lx).\n", accept_status); - - return (send_status | accept_status); -} -#endif /* WAKE_SECONDARY_VIA_INIT */ - -extern cpumask_t cpu_initialized; - -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - int cpu; -}; - -static void __cpuinit do_fork_idle(struct work_struct *work) -{ - struct create_idle *c_idle = - container_of(work, struct create_idle, work); - - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} -static int __cpuinit do_boot_cpu(int apicid, int cpu) -/* - * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad - * (ie clustered apic addressing mode), this is a LOGICAL apic ID. - * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. - */ -{ - unsigned long boot_error = 0; - int timeout; - unsigned long start_eip; - unsigned short nmi_high = 0, nmi_low = 0; - struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), - }; - INIT_WORK(&c_idle.work, do_fork_idle); - - alternatives_smp_switch(1); - - c_idle.idle = get_idle_for_cpu(cpu); - - /* - * We can't use kernel_thread since we must avoid to - * reschedule the child. - */ - if (c_idle.idle) { - c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) - (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); - init_idle(c_idle.idle, cpu); - goto do_rest; - } - - if (!keventd_up() || current_is_keventd()) - c_idle.work.func(&c_idle.work); - else { - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - } - - if (IS_ERR(c_idle.idle)) { - printk(KERN_ERR "failed fork for CPU %d\n", cpu); - return PTR_ERR(c_idle.idle); - } - - set_idle_for_cpu(cpu, c_idle.idle); -do_rest: - per_cpu(current_task, cpu) = c_idle.idle; - init_gdt(cpu); - early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); - - c_idle.idle->thread.ip = (unsigned long) start_secondary; - /* start_eip had better be page-aligned! */ - start_eip = setup_trampoline(); - - /* So we see what's up */ - printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); - /* Stack for startup_32 can be just as for start_secondary onwards */ - stack_start.sp = (void *) c_idle.idle->thread.sp; - - irq_ctx_init(cpu); - - /* - * This grunge runs the startup process for - * the targeted processor. - */ - - atomic_set(&init_deasserted, 0); - - Dprintk("Setting warm reset code and vector.\n"); - - store_NMI_vector(&nmi_high, &nmi_low); - - smpboot_setup_warm_reset_vector(start_eip); - /* - * Be paranoid about clearing APIC errors. - */ - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - - - /* - * Starting actual IPI sequence... - */ - boot_error = wakeup_secondary_cpu(apicid, start_eip); - - if (!boot_error) { - /* - * allow APs to start initializing. - */ - Dprintk("Before Callout %d.\n", cpu); - cpu_set(cpu, cpu_callout_map); - Dprintk("After Callout %d.\n", cpu); - - /* - * Wait 5s total for a response - */ - for (timeout = 0; timeout < 50000; timeout++) { - if (cpu_isset(cpu, cpu_callin_map)) - break; /* It has booted */ - udelay(100); - } - - if (cpu_isset(cpu, cpu_callin_map)) { - /* number CPUs logically, starting from 1 (BSP is 0) */ - Dprintk("OK.\n"); - printk("CPU%d: ", cpu); - print_cpu_info(&cpu_data(cpu)); - Dprintk("CPU has booted.\n"); - } else { - boot_error= 1; - if (*((volatile unsigned char *)trampoline_base) - == 0xA5) - /* trampoline started but...? */ - printk("Stuck ??\n"); - else - /* trampoline code not run */ - printk("Not responding.\n"); - inquire_remote_apic(apicid); - } - } - - if (boot_error) { - /* Try to put things back the way they were before ... */ - unmap_cpu_to_logical_apicid(cpu); - cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ - cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ - cpu_clear(cpu, cpu_possible_map); - per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; - } - - /* mark "stuck" area as not stuck */ - *((volatile unsigned long *)trampoline_base) = 0; - - return boot_error; -} - #ifdef CONFIG_HOTPLUG_CPU void cpu_exit_clear(void) { @@ -774,65 +305,6 @@ void __init native_smp_prepare_boot_cpu(void) __get_cpu_var(cpu_state) = CPU_ONLINE; } -int __cpuinit native_cpu_up(unsigned int cpu) -{ - int apicid = cpu_present_to_apicid(cpu); - unsigned long flags; - int err; - - WARN_ON(irqs_disabled()); - - Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); - - if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || - !physid_isset(apicid, phys_cpu_present_map)) { - printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); - return -EINVAL; - } - - /* - * Already booted CPU? - */ - if (cpu_isset(cpu, cpu_callin_map)) { - Dprintk("do_boot_cpu %d Already started\n", cpu); - return -ENOSYS; - } - - /* - * Save current MTRR state in case it was changed since early boot - * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: - */ - mtrr_save_state(); - - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - - /* init low mem mapping */ - clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, - min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); - flush_tlb_all(); - - err = do_boot_cpu(apicid, cpu); - if (err < 0) { - Dprintk("do_boot_cpu failed %d\n", err); - return err; - } - - /* - * Check TSC synchronization with the AP (keep irqs disabled - * while doing so): - */ - local_irq_save(flags); - check_tsc_sync_source(cpu); - local_irq_restore(flags); - - while (!cpu_isset(cpu, cpu_online_map)) { - cpu_relax(); - touch_nmi_watchdog(); - } - - return 0; -} - extern void impress_friends(void); extern void smp_checks(void); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 420ae4a33548..71f13b15bd89 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -71,119 +71,7 @@ int smp_threads_ready; /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -/* - * Store all idle threads, this can be reused instead of creating - * a new thread. Also avoids complicated thread destroy functionality - * for idle threads. - */ -#ifdef CONFIG_HOTPLUG_CPU -/* - * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is - * removed after init for !CONFIG_HOTPLUG_CPU. - */ -static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); -#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) -#define set_idle_for_cpu(x,p) (per_cpu(idle_thread_array, x) = (p)) -#else -struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; -#define get_idle_for_cpu(x) (idle_thread_array[(x)]) -#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) -#endif - -static atomic_t init_deasserted __cpuinitdata; - -#define smp_callin_clear_local_apic() do {} while (0) -#define map_cpu_to_logical_apicid() do {} while (0) - -/* - * Report back to the Boot Processor. - * Running on AP. - */ -void __cpuinit smp_callin(void) -{ - int cpuid, phys_id; - unsigned long timeout; - - /* - * If waken up by an INIT in an 82489DX configuration - * we may get here before an INIT-deassert IPI reaches - * our local APIC. We have to wait for the IPI or we'll - * lock up on an APIC access. - */ - wait_for_init_deassert(&init_deasserted); - - /* - * (This works even if the APIC is not enabled.) - */ - phys_id = GET_APIC_ID(apic_read(APIC_ID)); - cpuid = smp_processor_id(); - if (cpu_isset(cpuid, cpu_callin_map)) { - panic("smp_callin: phys CPU#%d, CPU#%d already present??\n", - phys_id, cpuid); - } - Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); - - /* - * STARTUP IPIs are fragile beasts as they might sometimes - * trigger some glue motherboard logic. Complete APIC bus - * silence for 1 second, this overestimates the time the - * boot CPU is spending to send the up to 2 STARTUP IPIs - * by a factor of two. This should be enough. - */ - - /* - * Waiting 2s total for startup (udelay is not yet working) - */ - timeout = jiffies + 2*HZ; - while (time_before(jiffies, timeout)) { - /* - * Has the boot CPU finished it's STARTUP sequence? - */ - if (cpu_isset(cpuid, cpu_callout_map)) - break; - cpu_relax(); - } - - if (!time_before(jiffies, timeout)) { - panic("smp_callin: CPU%d started up but did not get a callout!\n", - cpuid); - } - - /* - * the boot CPU has finished the init stage and is spinning - * on callin_map until we finish. We are free to set up this - * CPU, first the APIC. (this is probably redundant on most - * boards) - */ - - Dprintk("CALLIN, before setup_local_APIC().\n"); - smp_callin_clear_local_apic(); - setup_local_APIC(); - end_local_APIC_setup(); - map_cpu_to_logical_apicid(); - - /* - * Get our bogomips. - * - * Need to enable IRQs because it can take longer and then - * the NMI watchdog might kill us. - */ - local_irq_enable(); - calibrate_delay(); - local_irq_disable(); - Dprintk("Stack at about %p\n",&cpuid); - - /* - * Save our processor parameters - */ - smp_store_cpu_info(cpuid); - - /* - * Allow the master to continue. - */ - cpu_set(cpuid, cpu_callin_map); -} - +extern void smp_callin(void); /* * Setup code on secondary processor (after comming out of the trampoline) */ @@ -246,349 +134,6 @@ void __cpuinit start_secondary(void) cpu_idle(); } -extern volatile unsigned long init_rsp; -extern void (*initial_code)(void); - -#ifdef APIC_DEBUG -static void __inquire_remote_apic(int apicid) -{ - unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; - char *names[] = { "ID", "VERSION", "SPIV" }; - int timeout; - u32 status; - - printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); - - for (i = 0; i < ARRAY_SIZE(regs); i++) { - printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); - - /* - * Wait for idle. - */ - status = safe_apic_wait_icr_idle(); - if (status) - printk(KERN_CONT - "a previous APIC delivery may have failed\n"); - - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); - apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); - - timeout = 0; - do { - udelay(100); - status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; - } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); - - switch (status) { - case APIC_ICR_RR_VALID: - status = apic_read(APIC_RRR); - printk(KERN_CONT "%08x\n", status); - break; - default: - printk(KERN_CONT "failed\n"); - } - } -} -#endif - -/* - * Kick the secondary to wake up. - */ -static int __cpuinit wakeup_secondary_cpu(int phys_apicid, - unsigned int start_rip) -{ - unsigned long send_status, accept_status = 0; - int maxlvt, num_starts, j; - - /* - * Be paranoid about clearing APIC errors. - */ - if (APIC_INTEGRATED(apic_version[phys_apicid])) { - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - } - - Dprintk("Asserting INIT.\n"); - - /* - * Turn INIT on target chip - */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); - - /* - * Send IPI - */ - apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT - | APIC_DM_INIT); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - mdelay(10); - - Dprintk("Deasserting INIT.\n"); - - /* Target chip */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); - - /* Send IPI */ - apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - mb(); - atomic_set(&init_deasserted, 1); - - if (APIC_INTEGRATED(apic_version[phys_apicid])) - num_starts = 2; - else - num_starts = 0; - - /* - * Paravirt / VMI wants a startup IPI hook here to set up the - * target processor state. - */ - startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, - (unsigned long) init_rsp); - - - /* - * Run STARTUP IPI loop. - */ - Dprintk("#startup loops: %d.\n", num_starts); - - maxlvt = lapic_get_maxlvt(); - - for (j = 1; j <= num_starts; j++) { - Dprintk("Sending STARTUP #%d.\n",j); - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - Dprintk("After apic_write.\n"); - - /* - * STARTUP IPI - */ - - /* Target chip */ - apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); - - /* Boot on the stack */ - /* Kick the second */ - apic_write_around(APIC_ICR, APIC_DM_STARTUP | (start_rip>>12)); - - /* - * Give the other CPU some time to accept the IPI. - */ - udelay(300); - - Dprintk("Startup point 1.\n"); - - Dprintk("Waiting for send to finish...\n"); - send_status = safe_apic_wait_icr_idle(); - - /* - * Give the other CPU some time to accept the IPI. - */ - udelay(200); - /* - * Due to the Pentium erratum 3AP. - */ - if (maxlvt > 3) { - apic_read_around(APIC_SPIV); - apic_write(APIC_ESR, 0); - } - accept_status = (apic_read(APIC_ESR) & 0xEF); - if (send_status || accept_status) - break; - } - Dprintk("After Startup.\n"); - - if (send_status) - printk(KERN_ERR "APIC never delivered???\n"); - if (accept_status) - printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); - - return (send_status | accept_status); -} - -struct create_idle { - struct work_struct work; - struct task_struct *idle; - struct completion done; - int cpu; -}; - -static void __cpuinit do_fork_idle(struct work_struct *work) -{ - struct create_idle *c_idle = - container_of(work, struct create_idle, work); - - c_idle->idle = fork_idle(c_idle->cpu); - complete(&c_idle->done); -} - -/* - * Boot one CPU. - */ -static int __cpuinit do_boot_cpu(int cpu, int apicid) -{ - unsigned long boot_error = 0; - int timeout; - unsigned long start_rip; - struct create_idle c_idle = { - .cpu = cpu, - .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), - }; - INIT_WORK(&c_idle.work, do_fork_idle); - - /* allocate memory for gdts of secondary cpus. Hotplug is considered */ - if (!cpu_gdt_descr[cpu].address && - !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { - printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); - return -1; - } - - /* Allocate node local memory for AP pdas */ - if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { - struct x8664_pda *newpda, *pda; - int node = cpu_to_node(cpu); - pda = cpu_pda(cpu); - newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC, - node); - if (newpda) { - memcpy(newpda, pda, sizeof (struct x8664_pda)); - cpu_pda(cpu) = newpda; - } else - printk(KERN_ERR - "Could not allocate node local PDA for CPU %d on node %d\n", - cpu, node); - } - - alternatives_smp_switch(1); - - c_idle.idle = get_idle_for_cpu(cpu); - - if (c_idle.idle) { - c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) - (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); - init_idle(c_idle.idle, cpu); - goto do_rest; - } - - /* - * During cold boot process, keventd thread is not spun up yet. - * When we do cpu hot-add, we create idle threads on the fly, we should - * not acquire any attributes from the calling context. Hence the clean - * way to create kernel_threads() is to do that from keventd(). - * We do the current_is_keventd() due to the fact that ACPI notifier - * was also queuing to keventd() and when the caller is already running - * in context of keventd(), we would end up with locking up the keventd - * thread. - */ - if (!keventd_up() || current_is_keventd()) - c_idle.work.func(&c_idle.work); - else { - schedule_work(&c_idle.work); - wait_for_completion(&c_idle.done); - } - - if (IS_ERR(c_idle.idle)) { - printk("failed fork for CPU %d\n", cpu); - return PTR_ERR(c_idle.idle); - } - - set_idle_for_cpu(cpu, c_idle.idle); - -do_rest: - - cpu_pda(cpu)->pcurrent = c_idle.idle; - - start_rip = setup_trampoline(); - - init_rsp = c_idle.idle->thread.sp; - load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); - initial_code = start_secondary; - clear_tsk_thread_flag(c_idle.idle, TIF_FORK); - - printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, - cpus_weight(cpu_present_map), - apicid); - - /* - * This grunge runs the startup process for - * the targeted processor. - */ - - atomic_set(&init_deasserted, 0); - - Dprintk("Setting warm reset code and vector.\n"); - - smpboot_setup_warm_reset_vector(start_rip); - /* - * Be paranoid about clearing APIC errors. - */ - apic_write(APIC_ESR, 0); - apic_read(APIC_ESR); - - /* - * Starting actual IPI sequence... - */ - boot_error = wakeup_secondary_cpu(apicid, start_rip); - - if (!boot_error) { - /* - * allow APs to start initializing. - */ - Dprintk("Before Callout %d.\n", cpu); - cpu_set(cpu, cpu_callout_map); - Dprintk("After Callout %d.\n", cpu); - - /* - * Wait 5s total for a response - */ - for (timeout = 0; timeout < 50000; timeout++) { - if (cpu_isset(cpu, cpu_callin_map)) - break; /* It has booted */ - udelay(100); - } - - if (cpu_isset(cpu, cpu_callin_map)) { - /* number CPUs logically, starting from 1 (BSP is 0) */ - Dprintk("CPU has booted.\n"); - printk(KERN_INFO "CPU%d: ", cpu); - print_cpu_info(&cpu_data(cpu)); - } else { - boot_error = 1; - if (*((volatile unsigned char *)trampoline_base) - == 0xA5) - /* trampoline started but...? */ - printk("Stuck ??\n"); - else - /* trampoline code not run */ - printk("Not responding.\n"); -#ifdef APIC_DEBUG - inquire_remote_apic(apicid); -#endif - } - } - if (boot_error) { - cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ - clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ - clear_node_cpumask(cpu); /* was set by numa_add_cpu */ - cpu_clear(cpu, cpu_present_map); - cpu_clear(cpu, cpu_possible_map); - per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; - } - - /* mark "stuck" area as not stuck */ - *((volatile unsigned long *)trampoline_base) = 0; - - return boot_error; -} - cycles_t cacheflush_time; unsigned long cache_decay_ticks; @@ -745,64 +290,6 @@ void __init native_smp_prepare_boot_cpu(void) per_cpu(cpu_state, me) = CPU_ONLINE; } -/* - * Entry point to boot a CPU. - */ -int __cpuinit native_cpu_up(unsigned int cpu) -{ - int apicid = cpu_present_to_apicid(cpu); - unsigned long flags; - int err; - - WARN_ON(irqs_disabled()); - - Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); - - if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || - !physid_isset(apicid, phys_cpu_present_map)) { - printk("__cpu_up: bad cpu %d\n", cpu); - return -EINVAL; - } - - /* - * Already booted CPU? - */ - if (cpu_isset(cpu, cpu_callin_map)) { - Dprintk("do_boot_cpu %d Already started\n", cpu); - return -ENOSYS; - } - - /* - * Save current MTRR state in case it was changed since early boot - * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: - */ - mtrr_save_state(); - - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - /* Boot it! */ - err = do_boot_cpu(cpu, apicid); - if (err < 0) { - Dprintk("do_boot_cpu failed %d\n", err); - return err; - } - - /* Unleash the CPU! */ - Dprintk("waiting for cpu %d\n", cpu); - - /* - * Make sure and check TSC sync: - */ - local_irq_save(flags); - check_tsc_sync_source(cpu); - local_irq_restore(flags); - - while (!cpu_isset(cpu, cpu_online_map)) - cpu_relax(); - err = 0; - - return err; -} - extern void impress_friends(void); extern void smp_checks(void); diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 2ad2f4ffe498..ef26911dc22a 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -9,6 +9,7 @@ extern cpumask_t cpu_callout_map; extern int smp_num_siblings; extern unsigned int num_processors; +extern cpumask_t cpu_initialized; extern u16 x86_cpu_to_apicid_init[]; extern u16 x86_bios_cpu_apicid_init[]; @@ -34,6 +35,8 @@ extern struct { unsigned short ss; } stack_start; +extern unsigned long init_rsp; +extern unsigned long initial_code; struct smp_ops { void (*smp_prepare_boot_cpu)(void); -- cgit v1.2.1 From bbc2ff6a91a4eef8030018cd389bb12352d11b34 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:00 -0300 Subject: x86: integrate start_secondary It now looks the same between architectures, so we merge it in smpboot.c. Minor differences goes inside an ifdef Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 86 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/kernel/smpboot_32.c | 75 -------------------------------------- arch/x86/kernel/smpboot_64.c | 63 -------------------------------- 3 files changed, 85 insertions(+), 139 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 69c17965f48d..a36ae2785c48 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -229,6 +230,90 @@ void __cpuinit smp_callin(void) cpu_set(cpuid, cpu_callin_map); } +/* + * Activate a secondary processor. + */ +void __cpuinit start_secondary(void *unused) +{ + /* + * Don't put *anything* before cpu_init(), SMP booting is too + * fragile that we want to limit the things done here to the + * most necessary things. + */ +#ifdef CONFIG_VMI + vmi_bringup(); +#endif + cpu_init(); + preempt_disable(); + smp_callin(); + + /* otherwise gcc will move up smp_processor_id before the cpu_init */ + barrier(); + /* + * Check TSC synchronization with the BP: + */ + check_tsc_sync_target(); + + if (nmi_watchdog == NMI_IO_APIC) { + disable_8259A_irq(0); + enable_NMI_through_LVT0(); + enable_8259A_irq(0); + } + + /* This must be done before setting cpu_online_map */ + set_cpu_sibling_map(raw_smp_processor_id()); + wmb(); + + /* + * We need to hold call_lock, so there is no inconsistency + * between the time smp_call_function() determines number of + * IPI recipients, and the time when the determination is made + * for which cpus receive the IPI. Holding this + * lock helps us to not include this cpu in a currently in progress + * smp_call_function(). + */ + lock_ipi_call_lock(); +#ifdef CONFIG_X86_64 + spin_lock(&vector_lock); + + /* Setup the per cpu irq handling data structures */ + __setup_vector_irq(smp_processor_id()); + /* + * Allow the master to continue. + */ + spin_unlock(&vector_lock); +#endif + cpu_set(smp_processor_id(), cpu_online_map); + unlock_ipi_call_lock(); + per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; + + setup_secondary_clock(); + + wmb(); + cpu_idle(); +} + +#ifdef CONFIG_X86_32 +/* + * Everything has been set up for the secondary + * CPUs - they just need to reload everything + * from the task structure + * This function must not return. + */ +void __devinit initialize_secondary(void) +{ + /* + * We don't actually need to load the full TSS, + * basically just the stack pointer and the ip. + */ + + asm volatile( + "movl %0,%%esp\n\t" + "jmp *%1" + : + :"m" (current->thread.sp), "m" (current->thread.ip)); +} +#endif static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) { @@ -533,7 +618,6 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) } #endif /* WAKE_SECONDARY_VIA_NMI */ -extern void start_secondary(void *unused); #ifdef WAKE_SECONDARY_VIA_INIT static int __devinit wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index e82eeb2fdfef..77b045cfebd4 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -80,81 +80,6 @@ extern void unmap_cpu_to_logical_apicid(int cpu); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -extern void smp_callin(void); - -/* - * Activate a secondary processor. - */ -void __cpuinit start_secondary(void *unused) -{ - /* - * Don't put *anything* before cpu_init(), SMP booting is too - * fragile that we want to limit the things done here to the - * most necessary things. - */ -#ifdef CONFIG_VMI - vmi_bringup(); -#endif - cpu_init(); - preempt_disable(); - smp_callin(); - - /* otherwise gcc will move up smp_processor_id before the cpu_init */ - barrier(); - /* - * Check TSC synchronization with the BP: - */ - check_tsc_sync_target(); - - if (nmi_watchdog == NMI_IO_APIC) { - disable_8259A_irq(0); - enable_NMI_through_LVT0(); - enable_8259A_irq(0); - } - - /* This must be done before setting cpu_online_map */ - set_cpu_sibling_map(raw_smp_processor_id()); - wmb(); - - /* - * We need to hold call_lock, so there is no inconsistency - * between the time smp_call_function() determines number of - * IPI recipients, and the time when the determination is made - * for which cpus receive the IPI. Holding this - * lock helps us to not include this cpu in a currently in progress - * smp_call_function(). - */ - lock_ipi_call_lock(); - cpu_set(smp_processor_id(), cpu_online_map); - unlock_ipi_call_lock(); - per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; - - setup_secondary_clock(); - - wmb(); - cpu_idle(); -} - -/* - * Everything has been set up for the secondary - * CPUs - they just need to reload everything - * from the task structure - * This function must not return. - */ -void __devinit initialize_secondary(void) -{ - /* - * We don't actually need to load the full TSS, - * basically just the stack pointer and the ip. - */ - - asm volatile( - "movl %0,%%esp\n\t" - "jmp *%1" - : - :"m" (current->thread.sp),"m" (current->thread.ip)); -} - #ifdef CONFIG_HOTPLUG_CPU void cpu_exit_clear(void) { diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 71f13b15bd89..60cd8cf1b073 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -71,69 +71,6 @@ int smp_threads_ready; /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -extern void smp_callin(void); -/* - * Setup code on secondary processor (after comming out of the trampoline) - */ -void __cpuinit start_secondary(void) -{ - /* - * Dont put anything before smp_callin(), SMP - * booting is too fragile that we want to limit the - * things done here to the most necessary things. - */ - cpu_init(); - preempt_disable(); - smp_callin(); - - /* otherwise gcc will move up the smp_processor_id before the cpu_init */ - barrier(); - - /* - * Check TSC sync first: - */ - check_tsc_sync_target(); - - if (nmi_watchdog == NMI_IO_APIC) { - disable_8259A_irq(0); - enable_NMI_through_LVT0(); - enable_8259A_irq(0); - } - - /* - * The sibling maps must be set before turing the online map on for - * this cpu - */ - set_cpu_sibling_map(smp_processor_id()); - - /* - * We need to hold call_lock, so there is no inconsistency - * between the time smp_call_function() determines number of - * IPI recipients, and the time when the determination is made - * for which cpus receive the IPI in genapic_flat.c. Holding this - * lock helps us to not include this cpu in a currently in progress - * smp_call_function(). - */ - lock_ipi_call_lock(); - spin_lock(&vector_lock); - - /* Setup the per cpu irq handling data structures */ - __setup_vector_irq(smp_processor_id()); - /* - * Allow the master to continue. - */ - spin_unlock(&vector_lock); - cpu_set(smp_processor_id(), cpu_online_map); - unlock_ipi_call_lock(); - - per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; - - setup_secondary_clock(); - - wmb(); - cpu_idle(); -} - cycles_t cacheflush_time; unsigned long cache_decay_ticks; -- cgit v1.2.1 From a8db8453ff52609b14716361651ad10d2ab66682 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:01 -0300 Subject: x86: merge smp_prepare_boot_cpu it is practically the same between arches now, so it is moved to smpboot.c. Minor differences (gdt initialization) live inside an ifdef Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 18 ++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 14 -------------- arch/x86/kernel/smpboot_64.c | 14 -------------- 3 files changed, 18 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a36ae2785c48..b214d8dcc07a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -24,6 +24,9 @@ #include #include +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + /* Store all idle threads, this can be reused instead of creating * a new thread. Also avoids complicated thread destroy functionality * for idle threads. @@ -999,6 +1002,21 @@ int __cpuinit native_cpu_up(unsigned int cpu) return 0; } +/* + * Early setup to make printk work. + */ +void __init native_smp_prepare_boot_cpu(void) +{ + int me = smp_processor_id(); +#ifdef CONFIG_X86_32 + init_gdt(me); + switch_to_new_gdt(); +#endif + /* already set me in cpu_online_map in boot_cpu_init() */ + cpu_set(me, cpu_callout_map); + per_cpu(cpu_state, me) = CPU_ONLINE; +} + #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) { diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 77b045cfebd4..5d27b1db6c26 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -77,9 +77,6 @@ u8 apicid_2_node[MAX_APICID]; extern void map_cpu_to_logical_apicid(void); extern void unmap_cpu_to_logical_apicid(int cpu); -/* State of each CPU. */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; - #ifdef CONFIG_HOTPLUG_CPU void cpu_exit_clear(void) { @@ -219,17 +216,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) smp_boot_cpus(max_cpus); } -void __init native_smp_prepare_boot_cpu(void) -{ - unsigned int cpu = smp_processor_id(); - - init_gdt(cpu); - switch_to_new_gdt(); - - cpu_set(cpu, cpu_callout_map); - __get_cpu_var(cpu_state) = CPU_ONLINE; -} - extern void impress_friends(void); extern void smp_checks(void); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 60cd8cf1b073..f77299b0639e 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -68,9 +68,6 @@ /* Set when the idlers are all forked */ int smp_threads_ready; -/* State of each CPU */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; - cycles_t cacheflush_time; unsigned long cache_decay_ticks; @@ -216,17 +213,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) print_cpu_info(&cpu_data(0)); } -/* - * Early setup to make printk work. - */ -void __init native_smp_prepare_boot_cpu(void) -{ - int me = smp_processor_id(); - /* already set me in cpu_online_map in boot_cpu_init() */ - cpu_set(me, cpu_callout_map); - per_cpu(cpu_state, me) = CPU_ONLINE; -} - extern void impress_friends(void); extern void smp_checks(void); -- cgit v1.2.1 From 83f7eb9c674c1bcaad6ca258fdd7dd3b96465a62 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:02 -0300 Subject: x86: merge native_smp_cpus_done They look similar enough, and are merged. Only difference (zap_low_mapping for i386) is inside ifdef Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 21 ++++++++++++++++++++- arch/x86/kernel/smpboot_32.c | 21 --------------------- arch/x86/kernel/smpboot_64.c | 18 ------------------ 3 files changed, 20 insertions(+), 40 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b214d8dcc07a..26118b4a1c38 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -880,7 +880,6 @@ do_rest: apic_write(APIC_ESR, 0); apic_read(APIC_ESR); - /* * Starting actual IPI sequence... */ @@ -1017,6 +1016,26 @@ void __init native_smp_prepare_boot_cpu(void) per_cpu(cpu_state, me) = CPU_ONLINE; } +void __init native_smp_cpus_done(unsigned int max_cpus) +{ + /* + * Cleanup possible dangling ends... + */ + smpboot_restore_warm_reset_vector(); + + Dprintk("Boot done.\n"); + + impress_friends(); + smp_checks(); +#ifdef CONFIG_X86_IO_APIC + setup_ioapic_dest(); +#endif + check_nmi_watchdog(); +#ifdef CONFIG_X86_32 + zap_low_mappings(); +#endif +} + #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) { diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5d27b1db6c26..75fb5064af66 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -215,24 +215,3 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) mb(); smp_boot_cpus(max_cpus); } - -extern void impress_friends(void); -extern void smp_checks(void); - -void __init native_smp_cpus_done(unsigned int max_cpus) -{ - /* - * Cleanup possible dangling ends... - */ - smpboot_restore_warm_reset_vector(); - - Dprintk("Boot done.\n"); - - impress_friends(); - smp_checks(); -#ifdef CONFIG_X86_IO_APIC - setup_ioapic_dest(); -#endif - check_nmi_watchdog(); - zap_low_mappings(); -} diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index f77299b0639e..f4363a38d079 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -212,21 +212,3 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) printk(KERN_INFO "CPU%d: ", 0); print_cpu_info(&cpu_data(0)); } - -extern void impress_friends(void); -extern void smp_checks(void); - -/* - * Finish the SMP boot. - */ -void __init native_smp_cpus_done(unsigned int max_cpus) -{ - smpboot_restore_warm_reset_vector(); - - Dprintk("Boot done.\n"); - - impress_friends(); - smp_checks(); - setup_ioapic_dest(); - check_nmi_watchdog(); -} -- cgit v1.2.1 From bd7b47ba5e4ced4e20bed2394c9580637d44550a Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:03 -0300 Subject: x86: use physical id when disabling smp if smp configuration is not found at all, hook into 0. This is done to match x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 75fb5064af66..14db038b6b48 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -106,7 +106,11 @@ static void __init disable_smp(void) cpu_possible_map = cpumask_of_cpu(0); cpu_present_map = cpumask_of_cpu(0); smpboot_clear_io_apic_irqs(); - phys_cpu_present_map = physid_mask_of_physid(0); + if (smp_found_config) + phys_cpu_present_map = + physid_mask_of_physid(boot_cpu_physical_apicid); + else + phys_cpu_present_map = physid_mask_of_physid(0); map_cpu_to_logical_apicid(); cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0)); -- cgit v1.2.1 From e7bc8fbad4c582639334285dd1d9571578c58674 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:04 -0300 Subject: x86: get rid of smp_boot_cpus This patch get rid of smp_boot_cpus(), since it does not boot any cpu anymore. Its code is split in a way to make it closer to x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 14db038b6b48..d153d8423740 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -172,21 +172,19 @@ static int __init smp_sanity_check(unsigned max_cpus) return 0; } -/* - * Cycle through the processors sending APIC IPIs to boot each. - */ -static void __init smp_boot_cpus(unsigned int max_cpus) +/* These are wrappers to interface to the new boot process. Someone + who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ +void __init native_smp_prepare_cpus(unsigned int max_cpus) { + nmi_watchdog_default(); + cpu_callin_map = cpumask_of_cpu(0); + mb(); + /* * Setup boot CPU information */ smp_store_cpu_info(0); /* Final full version of the data */ - printk(KERN_INFO "CPU%d: ", 0); - print_cpu_info(&cpu_data(0)); - - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); boot_cpu_logical_apicid = logical_smp_processor_id(); - current_thread_info()->cpu = 0; set_cpu_sibling_map(0); @@ -197,25 +195,19 @@ static void __init smp_boot_cpus(unsigned int max_cpus) return; } + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + connect_bsp_APIC(); setup_local_APIC(); end_local_APIC_setup(); map_cpu_to_logical_apicid(); - setup_portio_remap(); smpboot_setup_io_apic(); + printk(KERN_INFO "CPU%d: ", 0); + print_cpu_info(&cpu_data(0)); setup_boot_clock(); } -/* These are wrappers to interface to the new boot process. Someone - who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ -void __init native_smp_prepare_cpus(unsigned int max_cpus) -{ - nmi_watchdog_default(); - cpu_callin_map = cpumask_of_cpu(0); - mb(); - smp_boot_cpus(max_cpus); -} -- cgit v1.2.1 From 7cefaa20e798c547f569ca3f79547f820c802997 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:05 -0300 Subject: x86: additions to i386 native_smp_prepare_cpus. Add function calls to native_smp_prepare_cpus in i386 to match x86_64 Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index d153d8423740..6be36d3eea4e 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -172,11 +172,23 @@ static int __init smp_sanity_check(unsigned max_cpus) return 0; } -/* These are wrappers to interface to the new boot process. Someone - who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ +static void __init smp_cpu_index_default(void) +{ + int i; + struct cpuinfo_x86 *c; + + for_each_cpu_mask(i, cpu_possible_map) { + c = &cpu_data(i); + /* mark all to hotplug */ + c->cpu_index = NR_CPUS; + } +} + void __init native_smp_prepare_cpus(unsigned int max_cpus) { nmi_watchdog_default(); + smp_cpu_index_default(); + current_cpu_data = boot_cpu_data; cpu_callin_map = cpumask_of_cpu(0); mb(); @@ -195,7 +207,11 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) return; } - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { + panic("Boot APIC ID in local APIC unexpected (%d vs %d)", + GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); + /* Or can we switch back to PIC here? */ + } connect_bsp_APIC(); setup_local_APIC(); -- cgit v1.2.1 From 3fa7b3487a1317f7d3be3043dbea316ca75abed5 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:06 -0300 Subject: x86: assign nr_ioapics = 0 in smpboot_hooks.h change smpboot_setup_io_apic() by to match x86_64 behaviour Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/mach-default/smpboot_hooks.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 7f45f6311059..8e1c6c06d057 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h @@ -41,4 +41,6 @@ static inline void smpboot_setup_io_apic(void) */ if (!skip_ioapic_setup && nr_ioapics) setup_IO_APIC(); + else + nr_ioapics = 0; } -- cgit v1.2.1 From 1db17f553425ae679bc771d4796b7349f00cb1d9 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:07 -0300 Subject: x86: change x86_64 native_smp_prepare_cpus to match i386 An APIC test is moved, and code is replaced by the mach-default already defined function (smpboot_setup_io_apic). setup_portio_remap() is added, but it is a nop in mach-default. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index f4363a38d079..6679ac502160 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -71,6 +71,7 @@ int smp_threads_ready; cycles_t cacheflush_time; unsigned long cache_decay_ticks; +static int boot_cpu_logical_apicid; /* * Fall back to non SMP mode after errors. * @@ -167,7 +168,11 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) { nmi_watchdog_default(); smp_cpu_index_default(); + cpu_callin_map = cpumask_of_cpu(0); + mb(); + current_cpu_data = boot_cpu_data; + boot_cpu_logical_apicid = logical_smp_processor_id(); current_thread_info()->cpu = 0; /* needed? */ set_cpu_sibling_map(0); @@ -177,6 +182,11 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) return; } + if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { + panic("Boot APIC ID in local APIC unexpected (%d vs %d)", + GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); + /* Or can we switch back to PIC here? */ + } /* * Switch from PIC to APIC mode. @@ -190,20 +200,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) enable_IO_APIC(); end_local_APIC_setup(); - if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { - panic("Boot APIC ID in local APIC unexpected (%d vs %d)", - GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); - /* Or can we switch back to PIC here? */ - } - - /* - * Now start the IO-APICs - */ - if (!skip_ioapic_setup && nr_ioapics) - setup_IO_APIC(); - else - nr_ioapics = 0; - /* * Set up local APIC timer on boot CPU. */ -- cgit v1.2.1 From 0df1ba8cabc6c2d613921c75e525826e6eb3210b Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:08 -0300 Subject: x86: add extra sanity check This test exists in x86_64 and also applies to i386. So we add it Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 6be36d3eea4e..ae23b603978c 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -118,6 +118,12 @@ static void __init disable_smp(void) static int __init smp_sanity_check(unsigned max_cpus) { + if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { + printk(KERN_WARNING "weird, boot CPU (#%d) not listed" + "by the BIOS.\n", hard_smp_processor_id()); + physid_set(hard_smp_processor_id(), phys_cpu_present_map); + } + /* * If we couldn't find an SMP configuration at boot time, * get out of here now! -- cgit v1.2.1 From 771263d31114adb5e234364a58280c876c2ed182 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:09 -0300 Subject: x86: change x86_64 sanity checks to match i386. They are mostly inocuous. APIC_INTEGRATED will expand to 1, check_phys_apicid_present is checking for the same thing it was before, etc. But the code is identical to i386 now, and will allow us to integrate it. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_64.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 6679ac502160..c66fb15b0131 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -50,6 +50,7 @@ #include #include +#include #include #include #include @@ -105,7 +106,7 @@ static int __init smp_sanity_check(unsigned max_cpus) * If we couldn't find an SMP configuration at boot time, * get out of here now! */ - if (!smp_found_config) { + if (!smp_found_config && !acpi_lapic) { printk(KERN_NOTICE "SMP motherboard not detected.\n"); disable_smp(); if (APIC_init_uniprocessor()) @@ -118,7 +119,7 @@ static int __init smp_sanity_check(unsigned max_cpus) * Should not be necessary because the MP table should list the boot * CPU too, but we do it for the sake of robustness anyway. */ - if (!physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map)) { + if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n", boot_cpu_physical_apicid); @@ -128,7 +129,8 @@ static int __init smp_sanity_check(unsigned max_cpus) /* * If we couldn't find a local APIC, then get out of here now! */ - if (!cpu_has_apic) { + if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && + !cpu_has_apic) { printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); @@ -136,6 +138,8 @@ static int __init smp_sanity_check(unsigned max_cpus) return -1; } + verify_local_APIC(); + /* * If SMP should be disabled, then really disable it! */ -- cgit v1.2.1 From 9f3734f631267d2f36008833b62670ca342ac000 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:10 -0300 Subject: x86: introduce smpboot_clear_io_apic x86_64 has two nr_ioapics = 0 statements. In 32-bit, it can be done too. We do it through the smpboot_clear_io_apic() inline function, to cope with subarchitectures (visws) that does not compile mpparse in Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot_32.c | 2 ++ arch/x86/kernel/smpboot_64.c | 4 ++-- include/asm-x86/mach-default/smpboot_hooks.h | 5 +++++ include/asm-x86/mach-visws/smpboot_hooks.h | 4 ++++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index ae23b603978c..5a0f57f35191 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -155,6 +155,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); + smpboot_clear_io_apic(); return -1; } @@ -173,6 +174,7 @@ static int __init smp_sanity_check(unsigned max_cpus) setup_local_APIC(); end_local_APIC_setup(); } + smpboot_clear_io_apic(); return -1; } return 0; diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index c66fb15b0131..775244545ffa 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -134,7 +134,7 @@ static int __init smp_sanity_check(unsigned max_cpus) printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); - nr_ioapics = 0; + smpboot_clear_io_apic(); return -1; } @@ -145,7 +145,7 @@ static int __init smp_sanity_check(unsigned max_cpus) */ if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); - nr_ioapics = 0; + smpboot_clear_io_apic(); return -1; } diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 8e1c6c06d057..3ff2c5bff93a 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h @@ -44,3 +44,8 @@ static inline void smpboot_setup_io_apic(void) else nr_ioapics = 0; } + +static inline void smpboot_clear_io_apic(void) +{ + nr_ioapics = 0; +} diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h index d926471fa359..c9b83e395a2e 100644 --- a/include/asm-x86/mach-visws/smpboot_hooks.h +++ b/include/asm-x86/mach-visws/smpboot_hooks.h @@ -22,3 +22,7 @@ static inline void smpboot_restore_warm_reset_vector(void) static inline void smpboot_setup_io_apic(void) { } + +static inline void smpboot_clear_io_apic(void) +{ +} -- cgit v1.2.1 From 8aef135c73436fa46fdb4dc8aba49d5539dee72d Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:11 -0300 Subject: x86: merge native_smp_prepare_cpus With the previous changes, code for native_smp_prepare_cpus() in i386 and x86_64 now look very similar. merge them into smpboot.c. Minor differences are inside ifdef Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 170 +++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 137 ---------------------------------- arch/x86/kernel/smpboot_64.c | 141 ----------------------------------- 3 files changed, 170 insertions(+), 278 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 26118b4a1c38..45119d39f31e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -75,6 +76,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); static atomic_t init_deasserted; +static int boot_cpu_logical_apicid; + /* ready for x86_64, no harm for x86, since it will overwrite after alloc */ unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); @@ -1001,6 +1004,173 @@ int __cpuinit native_cpu_up(unsigned int cpu) return 0; } +/* + * Fall back to non SMP mode after errors. + * + * RED-PEN audit/test this more. I bet there is more state messed up here. + */ +static __init void disable_smp(void) +{ + cpu_present_map = cpumask_of_cpu(0); + cpu_possible_map = cpumask_of_cpu(0); +#ifdef CONFIG_X86_32 + smpboot_clear_io_apic_irqs(); +#endif + if (smp_found_config) + phys_cpu_present_map = + physid_mask_of_physid(boot_cpu_physical_apicid); + else + phys_cpu_present_map = physid_mask_of_physid(0); + map_cpu_to_logical_apicid(); + cpu_set(0, per_cpu(cpu_sibling_map, 0)); + cpu_set(0, per_cpu(cpu_core_map, 0)); +} + +/* + * Various sanity checks. + */ +static int __init smp_sanity_check(unsigned max_cpus) +{ + if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { + printk(KERN_WARNING "weird, boot CPU (#%d) not listed" + "by the BIOS.\n", hard_smp_processor_id()); + physid_set(hard_smp_processor_id(), phys_cpu_present_map); + } + + /* + * If we couldn't find an SMP configuration at boot time, + * get out of here now! + */ + if (!smp_found_config && !acpi_lapic) { + printk(KERN_NOTICE "SMP motherboard not detected.\n"); + disable_smp(); + if (APIC_init_uniprocessor()) + printk(KERN_NOTICE "Local APIC not detected." + " Using dummy APIC emulation.\n"); + return -1; + } + + /* + * Should not be necessary because the MP table should list the boot + * CPU too, but we do it for the sake of robustness anyway. + */ + if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { + printk(KERN_NOTICE + "weird, boot CPU (#%d) not listed by the BIOS.\n", + boot_cpu_physical_apicid); + physid_set(hard_smp_processor_id(), phys_cpu_present_map); + } + + /* + * If we couldn't find a local APIC, then get out of here now! + */ + if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && + !cpu_has_apic) { + printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", + boot_cpu_physical_apicid); + printk(KERN_ERR "... forcing use of dummy APIC emulation." + "(tell your hw vendor)\n"); + smpboot_clear_io_apic(); + return -1; + } + + verify_local_APIC(); + + /* + * If SMP should be disabled, then really disable it! + */ + if (!max_cpus) { + printk(KERN_INFO "SMP mode deactivated," + "forcing use of dummy APIC emulation.\n"); + smpboot_clear_io_apic(); +#ifdef CONFIG_X86_32 + if (nmi_watchdog == NMI_LOCAL_APIC) { + printk(KERN_INFO "activating minimal APIC for" + "NMI watchdog use.\n"); + connect_bsp_APIC(); + setup_local_APIC(); + end_local_APIC_setup(); + } +#endif + return -1; + } + + return 0; +} + +static void __init smp_cpu_index_default(void) +{ + int i; + struct cpuinfo_x86 *c; + + for_each_cpu_mask(i, cpu_possible_map) { + c = &cpu_data(i); + /* mark all to hotplug */ + c->cpu_index = NR_CPUS; + } +} + +/* + * Prepare for SMP bootup. The MP table or ACPI has been read + * earlier. Just do some sanity checking here and enable APIC mode. + */ +void __init native_smp_prepare_cpus(unsigned int max_cpus) +{ + nmi_watchdog_default(); + smp_cpu_index_default(); + current_cpu_data = boot_cpu_data; + cpu_callin_map = cpumask_of_cpu(0); + mb(); + /* + * Setup boot CPU information + */ + smp_store_cpu_info(0); /* Final full version of the data */ + boot_cpu_logical_apicid = logical_smp_processor_id(); + current_thread_info()->cpu = 0; /* needed? */ + set_cpu_sibling_map(0); + + if (smp_sanity_check(max_cpus) < 0) { + printk(KERN_INFO "SMP disabled\n"); + disable_smp(); + return; + } + + if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { + panic("Boot APIC ID in local APIC unexpected (%d vs %d)", + GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); + /* Or can we switch back to PIC here? */ + } + +#ifdef CONFIG_X86_32 + connect_bsp_APIC(); +#endif + /* + * Switch from PIC to APIC mode. + */ + setup_local_APIC(); + +#ifdef CONFIG_X86_64 + /* + * Enable IO APIC before setting up error vector + */ + if (!skip_ioapic_setup && nr_ioapics) + enable_IO_APIC(); +#endif + end_local_APIC_setup(); + + map_cpu_to_logical_apicid(); + + setup_portio_remap(); + + smpboot_setup_io_apic(); + /* + * Set up local APIC timer on boot CPU. + */ + + printk(KERN_INFO "CPU%d: ", 0); + print_cpu_info(&cpu_data(0)); + setup_boot_clock(); +} /* * Early setup to make printk work. */ diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5a0f57f35191..3a1b9e40cedb 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -74,7 +74,6 @@ EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); u8 apicid_2_node[MAX_APICID]; -extern void map_cpu_to_logical_apicid(void); extern void unmap_cpu_to_logical_apicid(int cpu); #ifdef CONFIG_HOTPLUG_CPU @@ -94,144 +93,8 @@ void cpu_exit_clear(void) } #endif -static int boot_cpu_logical_apicid; /* Where the IO area was mapped on multiquad, always 0 otherwise */ void *xquad_portio; #ifdef CONFIG_X86_NUMAQ EXPORT_SYMBOL(xquad_portio); #endif - -static void __init disable_smp(void) -{ - cpu_possible_map = cpumask_of_cpu(0); - cpu_present_map = cpumask_of_cpu(0); - smpboot_clear_io_apic_irqs(); - if (smp_found_config) - phys_cpu_present_map = - physid_mask_of_physid(boot_cpu_physical_apicid); - else - phys_cpu_present_map = physid_mask_of_physid(0); - map_cpu_to_logical_apicid(); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); -} - -static int __init smp_sanity_check(unsigned max_cpus) -{ - if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { - printk(KERN_WARNING "weird, boot CPU (#%d) not listed" - "by the BIOS.\n", hard_smp_processor_id()); - physid_set(hard_smp_processor_id(), phys_cpu_present_map); - } - - /* - * If we couldn't find an SMP configuration at boot time, - * get out of here now! - */ - if (!smp_found_config && !acpi_lapic) { - printk(KERN_NOTICE "SMP motherboard not detected.\n"); - disable_smp(); - if (APIC_init_uniprocessor()) - printk(KERN_NOTICE "Local APIC not detected." - " Using dummy APIC emulation.\n"); - return -1; - } - - /* - * Should not be necessary because the MP table should list the boot - * CPU too, but we do it for the sake of robustness anyway. - * Makes no sense to do this check in clustered apic mode, so skip it - */ - if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { - printk("weird, boot CPU (#%d) not listed by the BIOS.\n", - boot_cpu_physical_apicid); - physid_set(hard_smp_processor_id(), phys_cpu_present_map); - } - - /* - * If we couldn't find a local APIC, then get out of here now! - */ - if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { - printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", - boot_cpu_physical_apicid); - printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); - smpboot_clear_io_apic(); - return -1; - } - - verify_local_APIC(); - - /* - * If SMP should be disabled, then really disable it! - */ - if (!max_cpus) { - smp_found_config = 0; - printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); - - if (nmi_watchdog == NMI_LOCAL_APIC) { - printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n"); - connect_bsp_APIC(); - setup_local_APIC(); - end_local_APIC_setup(); - } - smpboot_clear_io_apic(); - return -1; - } - return 0; -} - -static void __init smp_cpu_index_default(void) -{ - int i; - struct cpuinfo_x86 *c; - - for_each_cpu_mask(i, cpu_possible_map) { - c = &cpu_data(i); - /* mark all to hotplug */ - c->cpu_index = NR_CPUS; - } -} - -void __init native_smp_prepare_cpus(unsigned int max_cpus) -{ - nmi_watchdog_default(); - smp_cpu_index_default(); - current_cpu_data = boot_cpu_data; - cpu_callin_map = cpumask_of_cpu(0); - mb(); - - /* - * Setup boot CPU information - */ - smp_store_cpu_info(0); /* Final full version of the data */ - boot_cpu_logical_apicid = logical_smp_processor_id(); - current_thread_info()->cpu = 0; - - set_cpu_sibling_map(0); - - if (smp_sanity_check(max_cpus) < 0) { - printk(KERN_INFO "SMP disabled\n"); - disable_smp(); - return; - } - - if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { - panic("Boot APIC ID in local APIC unexpected (%d vs %d)", - GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); - /* Or can we switch back to PIC here? */ - } - - connect_bsp_APIC(); - setup_local_APIC(); - end_local_APIC_setup(); - map_cpu_to_logical_apicid(); - - setup_portio_remap(); - - smpboot_setup_io_apic(); - - printk(KERN_INFO "CPU%d: ", 0); - print_cpu_info(&cpu_data(0)); - setup_boot_clock(); -} - diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 775244545ffa..66b55629733b 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -71,144 +71,3 @@ int smp_threads_ready; cycles_t cacheflush_time; unsigned long cache_decay_ticks; - -static int boot_cpu_logical_apicid; -/* - * Fall back to non SMP mode after errors. - * - * RED-PEN audit/test this more. I bet there is more state messed up here. - */ -static __init void disable_smp(void) -{ - cpu_present_map = cpumask_of_cpu(0); - cpu_possible_map = cpumask_of_cpu(0); - if (smp_found_config) - phys_cpu_present_map = - physid_mask_of_physid(boot_cpu_physical_apicid); - else - phys_cpu_present_map = physid_mask_of_physid(0); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); -} - -/* - * Various sanity checks. - */ -static int __init smp_sanity_check(unsigned max_cpus) -{ - if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { - printk("weird, boot CPU (#%d) not listed by the BIOS.\n", - hard_smp_processor_id()); - physid_set(hard_smp_processor_id(), phys_cpu_present_map); - } - - /* - * If we couldn't find an SMP configuration at boot time, - * get out of here now! - */ - if (!smp_found_config && !acpi_lapic) { - printk(KERN_NOTICE "SMP motherboard not detected.\n"); - disable_smp(); - if (APIC_init_uniprocessor()) - printk(KERN_NOTICE "Local APIC not detected." - " Using dummy APIC emulation.\n"); - return -1; - } - - /* - * Should not be necessary because the MP table should list the boot - * CPU too, but we do it for the sake of robustness anyway. - */ - if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { - printk(KERN_NOTICE - "weird, boot CPU (#%d) not listed by the BIOS.\n", - boot_cpu_physical_apicid); - physid_set(hard_smp_processor_id(), phys_cpu_present_map); - } - - /* - * If we couldn't find a local APIC, then get out of here now! - */ - if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && - !cpu_has_apic) { - printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", - boot_cpu_physical_apicid); - printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); - smpboot_clear_io_apic(); - return -1; - } - - verify_local_APIC(); - - /* - * If SMP should be disabled, then really disable it! - */ - if (!max_cpus) { - printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); - smpboot_clear_io_apic(); - return -1; - } - - return 0; -} - -static void __init smp_cpu_index_default(void) -{ - int i; - struct cpuinfo_x86 *c; - - for_each_cpu_mask(i, cpu_possible_map) { - c = &cpu_data(i); - /* mark all to hotplug */ - c->cpu_index = NR_CPUS; - } -} - -/* - * Prepare for SMP bootup. The MP table or ACPI has been read - * earlier. Just do some sanity checking here and enable APIC mode. - */ -void __init native_smp_prepare_cpus(unsigned int max_cpus) -{ - nmi_watchdog_default(); - smp_cpu_index_default(); - cpu_callin_map = cpumask_of_cpu(0); - mb(); - - current_cpu_data = boot_cpu_data; - boot_cpu_logical_apicid = logical_smp_processor_id(); - current_thread_info()->cpu = 0; /* needed? */ - set_cpu_sibling_map(0); - - if (smp_sanity_check(max_cpus) < 0) { - printk(KERN_INFO "SMP disabled\n"); - disable_smp(); - return; - } - - if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { - panic("Boot APIC ID in local APIC unexpected (%d vs %d)", - GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); - /* Or can we switch back to PIC here? */ - } - - /* - * Switch from PIC to APIC mode. - */ - setup_local_APIC(); - - /* - * Enable IO APIC before setting up error vector - */ - if (!skip_ioapic_setup && nr_ioapics) - enable_IO_APIC(); - end_local_APIC_setup(); - - /* - * Set up local APIC timer on boot CPU. - */ - - setup_boot_clock(); - printk(KERN_INFO "CPU%d: ", 0); - print_cpu_info(&cpu_data(0)); -} -- cgit v1.2.1 From 2cd9fb71eedffb3a208a84daff705b9da5c915e8 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:12 -0300 Subject: x86: merge cpu_exit_clear this is the last remaining function in smpboot_32.c Since it is i386 specific, move it around an ifdef to smpboot.c Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 18 ++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 19 ------------------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 45119d39f31e..6a7fb1300073 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1207,6 +1207,24 @@ void __init native_smp_cpus_done(unsigned int max_cpus) } #ifdef CONFIG_HOTPLUG_CPU + +# ifdef CONFIG_X86_32 +void cpu_exit_clear(void) +{ + int cpu = raw_smp_processor_id(); + + idle_task_exit(); + + cpu_uninit(); + irq_ctx_exit(cpu); + + cpu_clear(cpu, cpu_callout_map); + cpu_clear(cpu, cpu_callin_map); + + unmap_cpu_to_logical_apicid(cpu); +} +# endif /* CONFIG_X86_32 */ + void remove_siblinginfo(int cpu) { int sibling; diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 3a1b9e40cedb..5469207fa863 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -74,25 +74,6 @@ EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); u8 apicid_2_node[MAX_APICID]; -extern void unmap_cpu_to_logical_apicid(int cpu); - -#ifdef CONFIG_HOTPLUG_CPU -void cpu_exit_clear(void) -{ - int cpu = raw_smp_processor_id(); - - idle_task_exit(); - - cpu_uninit(); - irq_ctx_exit(cpu); - - cpu_clear(cpu, cpu_callout_map); - cpu_clear(cpu, cpu_callin_map); - - unmap_cpu_to_logical_apicid(cpu); -} -#endif - /* Where the IO area was mapped on multiquad, always 0 otherwise */ void *xquad_portio; #ifdef CONFIG_X86_NUMAQ -- cgit v1.2.1 From acbb67341805d3b9ef263d8cbd103a6054164491 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:13 -0300 Subject: x86: move apicid mappings to smpboot.c They are i386 specific (the x86_64 definitions live elsewhere, and should remain there), so are enclosed around an ifdef Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 21 +++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 13 ------------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6a7fb1300073..75637fb760e7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -25,6 +25,27 @@ #include #include +/* + * FIXME: For x86_64, those are defined in other files. But moving them here, + * would make the setup areas dependent on smp, which is a loss. When we + * integrate apic between arches, we can probably do a better job, but + * right now, they'll stay here -- glommer + */ +#ifdef CONFIG_X86_32 +/* which logical CPU number maps to which CPU (physical APIC ID) */ +u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = + { [0 ... NR_CPUS-1] = BAD_APICID }; +void *x86_cpu_to_apicid_early_ptr; +DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); + +u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata + = { [0 ... NR_CPUS-1] = BAD_APICID }; +void *x86_bios_cpu_apicid_early_ptr; +DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); +#endif + /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 5469207fa863..3590afe575e7 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -59,19 +59,6 @@ #include #include -/* which logical CPU number maps to which CPU (physical APIC ID) */ -u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = - { [0 ... NR_CPUS-1] = BAD_APICID }; -void *x86_cpu_to_apicid_early_ptr; -DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; -EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); - -u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata - = { [0 ... NR_CPUS-1] = BAD_APICID }; -void *x86_bios_cpu_apicid_early_ptr; -DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; -EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); - u8 apicid_2_node[MAX_APICID]; /* Where the IO area was mapped on multiquad, always 0 otherwise */ -- cgit v1.2.1 From 4cedb3343f0b087275b9a8e23fc90737881ac91c Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:26:14 -0300 Subject: x86: remove smpboot_32.c and smpboot_64.c Remove the last leftovers from the files. Move the ones that are still used to the files they belong, the others that grep can't reach, simply throw away. Merge comments ontop of file and that's it: smpboot integrated Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 4 +-- arch/x86/kernel/smpboot.c | 42 +++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 68 ----------------------------------------- arch/x86/kernel/smpboot_64.c | 73 -------------------------------------------- arch/x86/pci/numa.c | 7 ++++- 5 files changed, 50 insertions(+), 144 deletions(-) delete mode 100644 arch/x86/kernel/smpboot_32.c delete mode 100644 arch/x86/kernel/smpboot_64.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5d33509fd1c1..d5a05a0cef62 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,10 +46,10 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_PCI) += early-quirks.o apm-y := apm_32.o obj-$(CONFIG_APM) += apm.o -obj-$(CONFIG_X86_SMP) += smpboot_$(BITS).o smp.o +obj-$(CONFIG_X86_SMP) += smp.o obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o obj-$(CONFIG_X86_32_SMP) += smpcommon.o -obj-$(CONFIG_X86_64_SMP) += smpboot_64.o tsc_sync.o smpcommon.o +obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 75637fb760e7..61b9a5b6fc07 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1,3 +1,44 @@ +/* + * x86 SMP booting functions + * + * (c) 1995 Alan Cox, Building #3 + * (c) 1998, 1999, 2000 Ingo Molnar + * Copyright 2001 Andi Kleen, SuSE Labs. + * + * Much of the core SMP work is based on previous work by Thomas Radke, to + * whom a great many thanks are extended. + * + * Thanks to Intel for making available several different Pentium, + * Pentium Pro and Pentium-II/Xeon MP machines. + * Original development of Linux SMP code supported by Caldera. + * + * This code is released under the GNU General Public License version 2 or + * later. + * + * Fixes + * Felix Koop : NR_CPUS used properly + * Jose Renau : Handle single CPU case. + * Alan Cox : By repeated request 8) - Total BogoMIPS report. + * Greg Wright : Fix for kernel stacks panic. + * Erich Boleyn : MP v1.4 and additional changes. + * Matthias Sattler : Changes for 2.1 kernel map. + * Michel Lespinasse : Changes for 2.1 kernel map. + * Michael Chastain : Change trampoline.S to gnu as. + * Alan Cox : Dumb bug: 'B' step PPro's are fine + * Ingo Molnar : Added APIC timers, based on code + * from Jose Renau + * Ingo Molnar : various cleanups and rewrites + * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. + * Maciej W. Rozycki : Bits for genuine 82489DX APICs + * Andi Kleen : Changed for SMP boot into long mode. + * Martin J. Bligh : Added support for multi-quad systems + * Dave Jones : Report invalid combinations of Athlon CPUs. + * Rusty Russell : Hacked into shape for new "hotplug" boot process. + * Andi Kleen : Converted to new state machine. + * Ashok Raj : CPU hotplug support + * Glauber Costa : i386 and x86_64 integration + */ + #include #include #include @@ -44,6 +85,7 @@ u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata void *x86_bios_cpu_apicid_early_ptr; DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); +u8 apicid_2_node[MAX_APICID]; #endif /* State of each CPU */ diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c deleted file mode 100644 index 3590afe575e7..000000000000 --- a/arch/x86/kernel/smpboot_32.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * x86 SMP booting functions - * - * (c) 1995 Alan Cox, Building #3 - * (c) 1998, 1999, 2000 Ingo Molnar - * - * Much of the core SMP work is based on previous work by Thomas Radke, to - * whom a great many thanks are extended. - * - * Thanks to Intel for making available several different Pentium, - * Pentium Pro and Pentium-II/Xeon MP machines. - * Original development of Linux SMP code supported by Caldera. - * - * This code is released under the GNU General Public License version 2 or - * later. - * - * Fixes - * Felix Koop : NR_CPUS used properly - * Jose Renau : Handle single CPU case. - * Alan Cox : By repeated request 8) - Total BogoMIPS report. - * Greg Wright : Fix for kernel stacks panic. - * Erich Boleyn : MP v1.4 and additional changes. - * Matthias Sattler : Changes for 2.1 kernel map. - * Michel Lespinasse : Changes for 2.1 kernel map. - * Michael Chastain : Change trampoline.S to gnu as. - * Alan Cox : Dumb bug: 'B' step PPro's are fine - * Ingo Molnar : Added APIC timers, based on code - * from Jose Renau - * Ingo Molnar : various cleanups and rewrites - * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. - * Maciej W. Rozycki : Bits for genuine 82489DX APICs - * Martin J. Bligh : Added support for multi-quad systems - * Dave Jones : Report invalid combinations of Athlon CPUs. -* Rusty Russell : Hacked into shape for new "hotplug" boot process. */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -u8 apicid_2_node[MAX_APICID]; - -/* Where the IO area was mapped on multiquad, always 0 otherwise */ -void *xquad_portio; -#ifdef CONFIG_X86_NUMAQ -EXPORT_SYMBOL(xquad_portio); -#endif diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c deleted file mode 100644 index 66b55629733b..000000000000 --- a/arch/x86/kernel/smpboot_64.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * x86 SMP booting functions - * - * (c) 1995 Alan Cox, Building #3 - * (c) 1998, 1999, 2000 Ingo Molnar - * Copyright 2001 Andi Kleen, SuSE Labs. - * - * Much of the core SMP work is based on previous work by Thomas Radke, to - * whom a great many thanks are extended. - * - * Thanks to Intel for making available several different Pentium, - * Pentium Pro and Pentium-II/Xeon MP machines. - * Original development of Linux SMP code supported by Caldera. - * - * This code is released under the GNU General Public License version 2 - * - * Fixes - * Felix Koop : NR_CPUS used properly - * Jose Renau : Handle single CPU case. - * Alan Cox : By repeated request 8) - Total BogoMIP report. - * Greg Wright : Fix for kernel stacks panic. - * Erich Boleyn : MP v1.4 and additional changes. - * Matthias Sattler : Changes for 2.1 kernel map. - * Michel Lespinasse : Changes for 2.1 kernel map. - * Michael Chastain : Change trampoline.S to gnu as. - * Alan Cox : Dumb bug: 'B' step PPro's are fine - * Ingo Molnar : Added APIC timers, based on code - * from Jose Renau - * Ingo Molnar : various cleanups and rewrites - * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. - * Maciej W. Rozycki : Bits for genuine 82489DX APICs - * Andi Kleen : Changed for SMP boot into long mode. - * Rusty Russell : Hacked into shape for new "hotplug" boot process. - * Andi Kleen : Converted to new state machine. - * Various cleanups. - * Probably mostly hotplug CPU ready now. - * Ashok Raj : CPU hotplug support - */ - - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* Set when the idlers are all forked */ -int smp_threads_ready; - -cycles_t cacheflush_time; -unsigned long cache_decay_ticks; diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index 9e30378364ed..79d0a98b9d03 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c @@ -20,7 +20,12 @@ int mp_bus_id_to_local[MAX_MP_BUSSES]; int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) -extern void *xquad_portio; /* Where the IO area was mapped */ +/* Where the IO area was mapped on multiquad, always 0 otherwise */ +void *xquad_portio; +#ifdef CONFIG_X86_NUMAQ +EXPORT_SYMBOL(xquad_portio); +#endif + #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ -- cgit v1.2.1 From d2953315c70a4783c94ae6af04f4b0aaad2f09c5 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:07:59 +0300 Subject: x86: lindent mpparse_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 277 ++++++++++++++++++++++--------------------- 1 file changed, 141 insertions(+), 136 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 20a345dd425b..269fd46df42c 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -41,7 +41,7 @@ unsigned int __cpuinitdata maxcpus = NR_CPUS; * MP-table. */ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); -int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; +int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; static int mp_current_pci_id = 0; /* I/O APIC entries */ @@ -56,8 +56,6 @@ int mp_irq_entries; int nr_ioapics; unsigned long mp_lapic_addr = 0; - - /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; EXPORT_SYMBOL(boot_cpu_physical_apicid); @@ -71,12 +69,11 @@ unsigned disabled_cpus __cpuinitdata; physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata - = { [0 ... NR_CPUS-1] = BAD_APICID }; + = {[0 ... NR_CPUS - 1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); - /* * Intel MP BIOS table parsing routines: */ @@ -114,13 +111,13 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) if (num_processors >= NR_CPUS) { printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." - " Processor ignored.\n", NR_CPUS); + " Processor ignored.\n", NR_CPUS); return; } if (num_processors >= maxcpus) { printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." - " Processor ignored.\n", maxcpus); + " Processor ignored.\n", maxcpus); return; } @@ -129,14 +126,14 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) cpu = first_cpu(tmp_map); physid_set(m->mpc_apicid, phys_cpu_present_map); - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { - /* + if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { + /* * x86_bios_cpu_apicid is required to have processors listed - * in same order as logical cpu numbers. Hence the first - * entry is BSP, and so on. - */ + * in same order as logical cpu numbers. Hence the first + * entry is BSP, and so on. + */ cpu = 0; - } + } /* are we being called early in kernel startup? */ if (x86_cpu_to_apicid_early_ptr) { u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; @@ -153,7 +150,7 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) cpu_set(cpu, cpu_present_map); } -static void __init MP_bus_info (struct mpc_config_bus *m) +static void __init MP_bus_info(struct mpc_config_bus *m) { char str[7]; @@ -176,24 +173,24 @@ static int bad_ioapic(unsigned long address) { if (nr_ioapics >= MAX_IO_APICS) { printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " - "(found %d)\n", MAX_IO_APICS, nr_ioapics); + "(found %d)\n", MAX_IO_APICS, nr_ioapics); panic("Recompile kernel with bigger MAX_IO_APICS!\n"); } if (!address) { printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" - " found in table, skipping!\n"); + " found in table, skipping!\n"); return 1; } return 0; } -static void __init MP_ioapic_info (struct mpc_config_ioapic *m) +static void __init MP_ioapic_info(struct mpc_config_ioapic *m) { if (!(m->mpc_flags & MPC_APIC_USABLE)) return; - printk("I/O APIC #%d at 0x%X.\n", - m->mpc_apicid, m->mpc_apicaddr); + printk(KERN_INFO "I/O APIC #%d at 0x%X.\n", m->mpc_apicid, + m->mpc_apicaddr); if (bad_ioapic(m->mpc_apicaddr)) return; @@ -202,25 +199,25 @@ static void __init MP_ioapic_info (struct mpc_config_ioapic *m) nr_ioapics++; } -static void __init MP_intsrc_info (struct mpc_config_intsrc *m) +static void __init MP_intsrc_info(struct mpc_config_intsrc *m) { - mp_irqs [mp_irq_entries] = *m; + mp_irqs[mp_irq_entries] = *m; Dprintk("Int: type %d, pol %d, trig %d, bus %d," " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, - m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, + m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); if (++mp_irq_entries >= MAX_IRQ_SOURCES) panic("Max # of irq sources exceeded!!\n"); } -static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) +static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) { Dprintk("Lint: type %d, pol %d, trig %d, bus %d," " IRQ %02x, APIC ID %x, APIC LINT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, - m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, + m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); } /* @@ -229,39 +226,38 @@ static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) { char str[16]; - int count=sizeof(*mpc); - unsigned char *mpt=((unsigned char *)mpc)+count; - - if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { - printk("MPTABLE: bad signature [%c%c%c%c]!\n", - mpc->mpc_signature[0], - mpc->mpc_signature[1], - mpc->mpc_signature[2], - mpc->mpc_signature[3]); + int count = sizeof(*mpc); + unsigned char *mpt = ((unsigned char *)mpc) + count; + + if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { + printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", + mpc->mpc_signature[0], + mpc->mpc_signature[1], + mpc->mpc_signature[2], mpc->mpc_signature[3]); return 0; } - if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { - printk("MPTABLE: checksum error!\n"); + if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { + printk(KERN_ERR "MPTABLE: checksum error!\n"); return 0; } - if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { + if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", - mpc->mpc_spec); + mpc->mpc_spec); return 0; } if (!mpc->mpc_lapic) { printk(KERN_ERR "MPTABLE: null local APIC address!\n"); return 0; } - memcpy(str,mpc->mpc_oem,8); + memcpy(str, mpc->mpc_oem, 8); str[8] = 0; - printk(KERN_INFO "MPTABLE: OEM ID: %s ",str); + printk(KERN_INFO "MPTABLE: OEM ID: %s ", str); - memcpy(str,mpc->mpc_productid,12); + memcpy(str, mpc->mpc_productid, 12); str[12] = 0; - printk("MPTABLE: Product ID: %s ",str); + printk(KERN_INFO "MPTABLE: Product ID: %s ", str); - printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic); + printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); /* save the local APIC address, it might be non-default */ if (!acpi_lapic) @@ -271,52 +267,52 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) return 1; /* - * Now process the configuration blocks. + * Now process the configuration blocks. */ while (count < mpc->mpc_length) { - switch(*mpt) { - case MP_PROCESSOR: + switch (*mpt) { + case MP_PROCESSOR: { - struct mpc_config_processor *m= - (struct mpc_config_processor *)mpt; + struct mpc_config_processor *m = + (struct mpc_config_processor *)mpt; if (!acpi_lapic) MP_processor_info(m); mpt += sizeof(*m); count += sizeof(*m); break; } - case MP_BUS: + case MP_BUS: { - struct mpc_config_bus *m= - (struct mpc_config_bus *)mpt; + struct mpc_config_bus *m = + (struct mpc_config_bus *)mpt; MP_bus_info(m); mpt += sizeof(*m); count += sizeof(*m); break; } - case MP_IOAPIC: + case MP_IOAPIC: { - struct mpc_config_ioapic *m= - (struct mpc_config_ioapic *)mpt; + struct mpc_config_ioapic *m = + (struct mpc_config_ioapic *)mpt; MP_ioapic_info(m); mpt += sizeof(*m); count += sizeof(*m); break; } - case MP_INTSRC: + case MP_INTSRC: { - struct mpc_config_intsrc *m= - (struct mpc_config_intsrc *)mpt; + struct mpc_config_intsrc *m = + (struct mpc_config_intsrc *)mpt; MP_intsrc_info(m); mpt += sizeof(*m); count += sizeof(*m); break; } - case MP_LINTSRC: + case MP_LINTSRC: { - struct mpc_config_lintsrc *m= - (struct mpc_config_lintsrc *)mpt; + struct mpc_config_lintsrc *m = + (struct mpc_config_lintsrc *)mpt; MP_lintsrc_info(m); mpt += sizeof(*m); count += sizeof(*m); @@ -345,7 +341,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) int ELCR_fallback = 0; intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* conforming */ + intsrc.mpc_irqflag = 0; /* conforming */ intsrc.mpc_srcbus = 0; intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; @@ -360,12 +356,16 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) * If it does, we assume it's valid. */ if (mpc_default_type == 5) { - printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); + printk(KERN_INFO "ISA/PCI bus type with no IRQ information... " + "falling back to ELCR\n"); - if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) - printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n"); + if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || + ELCR_trigger(13)) + printk(KERN_ERR "ELCR contains invalid data... " + "not using ELCR\n"); else { - printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); + printk(KERN_INFO + "Using ELCR to identify PCI interrupts\n"); ELCR_fallback = 1; } } @@ -394,13 +394,13 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) } intsrc.mpc_srcbusirq = i; - intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ + intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ MP_intsrc_info(&intsrc); } intsrc.mpc_irqtype = mp_ExtINT; intsrc.mpc_srcbusirq = 0; - intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ + intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ MP_intsrc_info(&intsrc); } @@ -436,14 +436,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) bus.mpc_type = MP_BUS; bus.mpc_busid = 0; switch (mpc_default_type) { - default: - printk(KERN_ERR "???\nUnknown standard configuration %d\n", - mpc_default_type); - /* fall through */ - case 1: - case 5: - memcpy(bus.mpc_bustype, "ISA ", 6); - break; + default: + printk(KERN_ERR "???\nUnknown standard configuration %d\n", + mpc_default_type); + /* fall through */ + case 1: + case 5: + memcpy(bus.mpc_bustype, "ISA ", 6); + break; } MP_bus_info(&bus); if (mpc_default_type > 4) { @@ -465,7 +465,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) construct_default_ioirq_mptable(mpc_default_type); lintsrc.mpc_type = MP_LINTSRC; - lintsrc.mpc_irqflag = 0; /* conforming */ + lintsrc.mpc_irqflag = 0; /* conforming */ lintsrc.mpc_srcbusid = 0; lintsrc.mpc_srcbusirq = 0; lintsrc.mpc_destapic = MP_APIC_ALL; @@ -493,14 +493,14 @@ static void __init __get_smp_config(unsigned early) */ if (acpi_lapic && acpi_ioapic) { printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " - "information\n"); + "information\n"); return; } else if (acpi_lapic) printk(KERN_INFO "Using ACPI for processor (LAPIC) " - "configuration information\n"); + "configuration information\n"); printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", - mpf->mpf_specification); + mpf->mpf_specification); /* * Now see if we need to read further. @@ -514,7 +514,8 @@ static void __init __get_smp_config(unsigned early) return; } - printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); + printk(KERN_INFO "Default MP configuration #%d\n", + mpf->mpf_feature1); construct_default_ISA_mptable(mpf->mpf_feature1); } else if (mpf->mpf_physptr) { @@ -525,8 +526,10 @@ static void __init __get_smp_config(unsigned early) */ if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { smp_found_config = 0; - printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); + printk(KERN_ERR + "BIOS bug, MP table errors detected!...\n"); + printk(KERN_ERR "... disabling SMP support. " + "(tell your hw vendor)\n"); return; } @@ -540,7 +543,9 @@ static void __init __get_smp_config(unsigned early) if (!mp_irq_entries) { struct mpc_config_bus bus; - printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); + printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " + "using default mptable. " + "(tell your hw vendor)\n"); bus.mpc_type = MP_BUS; bus.mpc_busid = 0; @@ -573,21 +578,21 @@ void __init get_smp_config(void) static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned reserve) { - extern void __bad_mpf_size(void); + extern void __bad_mpf_size(void); unsigned int *bp = phys_to_virt(base); struct intel_mp_floating *mpf; - Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); + Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); if (sizeof(*mpf) != 16) __bad_mpf_size(); while (length > 0) { mpf = (struct intel_mp_floating *)bp; if ((*bp == SMP_MAGIC_IDENT) && - (mpf->mpf_length == 1) && - !mpf_checksum((unsigned char *)bp, 16) && - ((mpf->mpf_specification == 1) - || (mpf->mpf_specification == 4)) ) { + (mpf->mpf_length == 1) && + !mpf_checksum((unsigned char *)bp, 16) && + ((mpf->mpf_specification == 1) + || (mpf->mpf_specification == 4))) { smp_found_config = 1; mpf_found = mpf; @@ -620,8 +625,8 @@ static void __init __find_smp_config(unsigned reserve) * 3) Scan the 64K of bios */ if (smp_scan_config(0x0, 0x400, reserve) || - smp_scan_config(639*0x400, 0x400, reserve) || - smp_scan_config(0xF0000, 0x10000, reserve)) + smp_scan_config(639 * 0x400, 0x400, reserve) || + smp_scan_config(0xF0000, 0x10000, reserve)) return; /* * If it is an SMP machine we should know now. @@ -642,7 +647,7 @@ static void __init __find_smp_config(unsigned reserve) return; /* If we have come this far, we did not find an MP table */ - printk(KERN_INFO "No mptable found.\n"); + printk(KERN_INFO "No mptable found.\n"); } void __init early_find_smp_config(void) @@ -663,17 +668,17 @@ void __init find_smp_config(void) void __init mp_register_lapic_address(u64 address) { - mp_lapic_addr = (unsigned long) address; + mp_lapic_addr = (unsigned long)address; set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); if (boot_cpu_physical_apicid == -1U) boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); } -void __cpuinit mp_register_lapic (u8 id, u8 enabled) +void __cpuinit mp_register_lapic(u8 id, u8 enabled) { struct mpc_config_processor processor; - int boot_cpu = 0; - + int boot_cpu = 0; + if (id == boot_cpu_physical_apicid) boot_cpu = 1; @@ -694,10 +699,10 @@ void __cpuinit mp_register_lapic (u8 id, u8 enabled) #define MP_MAX_IOAPIC_PIN 127 static struct mp_ioapic_routing { - int apic_id; - int gsi_start; - int gsi_end; - u32 pin_programmed[4]; + int apic_id; + int gsi_start; + int gsi_end; + u32 pin_programmed[4]; } mp_ioapic_routing[MAX_IO_APICS]; static int mp_find_ioapic(int gsi) @@ -707,7 +712,7 @@ static int mp_find_ioapic(int gsi) /* Find the IOAPIC that manages this GSI. */ for (i = 0; i < nr_ioapics; i++) { if ((gsi >= mp_ioapic_routing[i].gsi_start) - && (gsi <= mp_ioapic_routing[i].gsi_end)) + && (gsi <= mp_ioapic_routing[i].gsi_end)) return i; } @@ -745,31 +750,30 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); mp_ioapics[idx].mpc_apicver = 0; - + /* * Build basic IRQ lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI IRQs). */ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; mp_ioapic_routing[idx].gsi_start = gsi_base; - mp_ioapic_routing[idx].gsi_end = gsi_base + - io_apic_get_redir_entries(idx); + mp_ioapic_routing[idx].gsi_end = gsi_base + + io_apic_get_redir_entries(idx); printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, " - "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_start, - mp_ioapic_routing[idx].gsi_end); + "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, + mp_ioapics[idx].mpc_apicaddr, + mp_ioapic_routing[idx].gsi_start, + mp_ioapic_routing[idx].gsi_end); nr_ioapics++; } -void __init -mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) +void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { struct mpc_config_intsrc intsrc; - int ioapic = -1; - int pin = -1; + int ioapic = -1; + int pin = -1; /* * Convert 'gsi' to 'ioapic.pin'. @@ -791,13 +795,13 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) intsrc.mpc_irqtype = mp_INT; intsrc.mpc_irqflag = (trigger << 2) | polarity; intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ - intsrc.mpc_dstirq = pin; /* INTIN# */ + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", - intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); mp_irqs[mp_irq_entries] = intsrc; @@ -824,7 +828,7 @@ void __init mp_config_acpi_legacy_irqs(void) return; intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* Conforming */ + intsrc.mpc_irqflag = 0; /* Conforming */ intsrc.mpc_srcbus = MP_ISA_BUS; intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; @@ -839,28 +843,29 @@ void __init mp_config_acpi_legacy_irqs(void) struct mpc_config_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ - if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) + if (irq->mpc_srcbus == MP_ISA_BUS + && irq->mpc_srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && - (irq->mpc_dstirq == i)) + (irq->mpc_dstirq == i)) break; } if (idx != mp_irq_entries) { printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); - continue; /* IRQ already used */ + continue; /* IRQ already used */ } intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_srcbusirq = i; /* Identity mapped */ + intsrc.mpc_srcbusirq = i; /* Identity mapped */ intsrc.mpc_dstirq = i; Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " - "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, + "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); mp_irqs[mp_irq_entries] = intsrc; @@ -899,21 +904,21 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); if (idx > 3) { printk(KERN_ERR "Invalid reference to IOAPIC pin " - "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, - ioapic_pin); + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, + ioapic_pin); return gsi; } - if ((1< Date: Mon, 17 Mar 2008 22:08:05 +0300 Subject: x86: add bad_ioapic to mpparse_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 000b51b78fbd..fd89d3521bd3 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -287,6 +287,21 @@ static void __init MP_bus_info (struct mpc_config_bus *m) } } +static int bad_ioapic(unsigned long address) +{ + if (nr_ioapics >= MAX_IO_APICS) { + printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " + "(found %d)\n", MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS!\n"); + } + if (!address) { + printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" + " found in table, skipping!\n"); + return 1; + } + return 0; +} + static void __init MP_ioapic_info (struct mpc_config_ioapic *m) { if (!(m->mpc_flags & MPC_APIC_USABLE)) @@ -294,16 +309,10 @@ static void __init MP_ioapic_info (struct mpc_config_ioapic *m) printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", - MAX_IO_APICS, nr_ioapics); - panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); - } - if (!m->mpc_apicaddr) { - printk(KERN_ERR "WARNING: bogus zero I/O APIC address" - " found in MP table, skipping!\n"); + + if (bad_ioapic(m->mpc_apicaddr)) return; - } + mp_ioapics[nr_ioapics] = *m; nr_ioapics++; } @@ -918,16 +927,8 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) int idx = 0; int tmpid; - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " - "(found %d)\n", MAX_IO_APICS, nr_ioapics); - panic("Recompile kernel with bigger MAX_IO_APICS!\n"); - } - if (!address) { - printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" - " found in MADT table, skipping!\n"); + if (bad_ioapic(address)) return; - } idx = nr_ioapics++; -- cgit v1.2.1 From e3e3ffa20351b32b5eaa6020d051305c8d803ed4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:11 +0300 Subject: x86: add uniq_ioapic_id to mpparse_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index fd89d3521bd3..838e4974e1ce 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -922,31 +922,30 @@ static int mp_find_ioapic (int gsi) return -1; } +static u8 uniq_ioapic_id(u8 id) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else + return id; +} + void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) { int idx = 0; - int tmpid; if (bad_ioapic(address)) return; - idx = nr_ioapics++; + idx = nr_ioapics; mp_ioapics[idx].mpc_type = MP_IOAPIC; mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; mp_ioapics[idx].mpc_apicaddr = address; set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - tmpid = io_apic_get_unique_id(idx, id); - else - tmpid = id; - if (tmpid == -1) { - nr_ioapics--; - return; - } - mp_ioapics[idx].mpc_apicid = tmpid; + mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); /* @@ -960,9 +959,12 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, + mp_ioapics[idx].mpc_apicver, + mp_ioapics[idx].mpc_apicaddr, mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); + + nr_ioapics++; } void __init -- cgit v1.2.1 From ce3fe6b2bfded4f5d931c5f2f9325dc2e3fd3a74 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:17 +0300 Subject: x86: use get_bios_ebda in mpparse_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 2 +- arch/x86/kernel/mpparse_64.c | 11 ++++------- arch/x86/kernel/setup_32.c | 2 +- arch/x86/mm/discontig_32.c | 2 +- include/asm-x86/bios_ebda.h | 15 +++++++++++++++ include/asm-x86/mach-default/bios_ebda.h | 15 --------------- 6 files changed, 22 insertions(+), 25 deletions(-) create mode 100644 include/asm-x86/bios_ebda.h delete mode 100644 include/asm-x86/mach-default/bios_ebda.h diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 838e4974e1ce..a2162644cb4e 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -27,11 +27,11 @@ #include #include #include +#include #include #include #include -#include /* Have we found an MP table */ int smp_found_config; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 269fd46df42c..fb74135f9d0e 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -641,13 +642,9 @@ static void __init __find_smp_config(unsigned reserve) * should be fixed. */ - address = *(unsigned short *)phys_to_virt(0x40E); - address <<= 4; - if (smp_scan_config(address, 0x1000, reserve)) - return; - - /* If we have come this far, we did not find an MP table */ - printk(KERN_INFO "No mptable found.\n"); + address = get_bios_ebda(); + if (address) + smp_scan_config(address, 0x1000, reserve); } void __init early_find_smp_config(void) diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index eb97bcfe0f6f..58f3c1fbc5c3 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -62,7 +62,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 8e25e06ff730..eba0bbede7a6 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h new file mode 100644 index 000000000000..9cbd9a668af8 --- /dev/null +++ b/include/asm-x86/bios_ebda.h @@ -0,0 +1,15 @@ +#ifndef _MACH_BIOS_EBDA_H +#define _MACH_BIOS_EBDA_H + +/* + * there is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E. + */ +static inline unsigned int get_bios_ebda(void) +{ + unsigned int address = *(unsigned short *)phys_to_virt(0x40E); + address <<= 4; + return address; /* 0 means none */ +} + +#endif /* _MACH_BIOS_EBDA_H */ diff --git a/include/asm-x86/mach-default/bios_ebda.h b/include/asm-x86/mach-default/bios_ebda.h deleted file mode 100644 index 9cbd9a668af8..000000000000 --- a/include/asm-x86/mach-default/bios_ebda.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _MACH_BIOS_EBDA_H -#define _MACH_BIOS_EBDA_H - -/* - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E. - */ -static inline unsigned int get_bios_ebda(void) -{ - unsigned int address = *(unsigned short *)phys_to_virt(0x40E); - address <<= 4; - return address; /* 0 means none */ -} - -#endif /* _MACH_BIOS_EBDA_H */ -- cgit v1.2.1 From 85e46035bec6f114ad07ce8a9c70388568b1afd4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:24 +0300 Subject: x86: limit scan to 1k of EBDA. Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index fb74135f9d0e..19c826485516 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -640,11 +640,13 @@ static void __init __find_smp_config(unsigned reserve) * trustworthy, simply because the SMP table may have been * stomped on during early boot. These loaders are buggy and * should be fixed. + * + * MP1.4 SPEC states to only scan first 1K of 4K EBDA. */ address = get_bios_ebda(); if (address) - smp_scan_config(address, 0x1000, reserve); + smp_scan_config(address, 0x400, reserve); } void __init early_find_smp_config(void) -- cgit v1.2.1 From 555b07646d5bd0bcd4825e83580d5f6bb34259ea Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:30 +0300 Subject: x86: rename gsi_start to gsi_base to match mpparse_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 19c826485516..83a36eed081b 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -699,7 +699,7 @@ void __cpuinit mp_register_lapic(u8 id, u8 enabled) static struct mp_ioapic_routing { int apic_id; - int gsi_start; + int gsi_base; int gsi_end; u32 pin_programmed[4]; } mp_ioapic_routing[MAX_IO_APICS]; @@ -710,7 +710,7 @@ static int mp_find_ioapic(int gsi) /* Find the IOAPIC that manages this GSI. */ for (i = 0; i < nr_ioapics; i++) { - if ((gsi >= mp_ioapic_routing[i].gsi_start) + if ((gsi >= mp_ioapic_routing[i].gsi_base) && (gsi <= mp_ioapic_routing[i].gsi_end)) return i; } @@ -755,14 +755,14 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) * and to prevent reprogramming of IOAPIC pins (PCI IRQs). */ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; - mp_ioapic_routing[idx].gsi_start = gsi_base; + mp_ioapic_routing[idx].gsi_base = gsi_base; mp_ioapic_routing[idx].gsi_end = gsi_base + io_apic_get_redir_entries(idx); printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, " "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_start, + mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); nr_ioapics++; @@ -780,7 +780,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) ioapic = mp_find_ioapic(gsi); if (ioapic < 0) return; - pin = gsi - mp_ioapic_routing[ioapic].gsi_start; + pin = gsi - mp_ioapic_routing[ioapic].gsi_base; /* * TBD: This check is for faulty timer entries, where the override @@ -892,7 +892,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) return gsi; } - ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start; + ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; /* * Avoid pin reprogramming. PRTs typically include entries -- cgit v1.2.1 From 4655c7deca112bea86ca00f616f19c3717f687aa Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:36 +0300 Subject: x86: remove mpc_apic_id() Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 9 +++++++++ include/asm-x86/genapic_32.h | 3 --- include/asm-x86/mach-bigsmp/mach_apic.h | 11 ----------- include/asm-x86/mach-default/mach_apic.h | 11 ----------- include/asm-x86/mach-es7000/mach_apic.h | 10 ---------- include/asm-x86/mach-generic/mach_apic.h | 1 - include/asm-x86/mach-summit/mach_apic.h | 11 ----------- 7 files changed, 9 insertions(+), 47 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index a2162644cb4e..072fcc462399 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -118,7 +118,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) return; } +#ifdef CONFIG_X86_NUMAQ apicid = mpc_apic_id(m, translation_table[mpc_record]); +#else + Dprintk("Processor #%d %u:%u APIC version %d\n", + m->mpc_apicid, + (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, + m->mpc_apicver); + apicid = m->mpc_apicid; +#endif if (m->mpc_featureflag&(1<<0)) Dprintk(" Floating point unit present.\n"); diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 33e3ffe1766c..5d024400ddde 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -42,8 +42,6 @@ struct genapic { int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - int (*mpc_apic_id)(struct mpc_config_processor *m, - struct mpc_config_translation *t); void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); @@ -105,7 +103,6 @@ struct genapic { APICFUNC(cpu_to_logical_apicid) \ APICFUNC(cpu_present_to_apicid) \ APICFUNC(apicid_to_cpu_present) \ - APICFUNC(mpc_apic_id) \ APICFUNC(setup_portio_remap) \ APICFUNC(check_phys_apicid_present) \ APICFUNC(mpc_oem_bus_info) \ diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 0d55b1f6d56b..8327907c79bf 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h @@ -106,17 +106,6 @@ static inline int cpu_to_logical_apicid(int cpu) return cpu_physical_id(cpu); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index e081bdccde2b..13900e8cc1ab 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -100,17 +100,6 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) return physid_mask_of_physid(phys_apicid); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - static inline void setup_portio_remap(void) { } diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index 04cba9f1e375..0137b6e142cc 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -127,16 +127,6 @@ static inline int cpu_to_logical_apicid(int cpu) #endif } -static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); -} - static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h index a236e7021528..6eff343e1233 100644 --- a/include/asm-x86/mach-generic/mach_apic.h +++ b/include/asm-x86/mach-generic/mach_apic.h @@ -19,7 +19,6 @@ #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) -#define mpc_apic_id (genapic->mpc_apic_id) #define setup_portio_remap (genapic->setup_portio_remap) #define check_apicid_present (genapic->check_apicid_present) #define check_phys_apicid_present (genapic->check_phys_apicid_present) diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 91d7641cddc9..1f76c2e70232 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h @@ -125,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid) return physid_mask_of_physid(0); } -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - static inline void setup_portio_remap(void) { } -- cgit v1.2.1 From d285e338899a4ff662a17b22d3bb0e48bb1465d4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:42 +0300 Subject: x86: remove mpc_oem_pci_bus() Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 2 ++ arch/x86/pci/numa.c | 8 ++++++++ include/asm-x86/genapic_32.h | 3 --- include/asm-x86/mach-default/mach_mpparse.h | 5 ----- include/asm-x86/mach-es7000/mach_mpparse.h | 5 ----- include/asm-x86/mach-generic/mach_mpparse.h | 1 - include/asm-x86/mach-numaq/mach_mpparse.h | 12 ++---------- include/asm-x86/mach-summit/mach_mpparse.h | 5 ----- 8 files changed, 12 insertions(+), 29 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 072fcc462399..621eac569550 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -278,7 +278,9 @@ static void __init MP_bus_info (struct mpc_config_bus *m) set_bit(m->mpc_busid, mp_bus_not_pci); if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { +#ifdef CONFIG_X86_NUMAQ mpc_oem_pci_bus(m, translation_table[mpc_record]); +#endif clear_bit(m->mpc_busid, mp_bus_not_pci); mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; mp_current_pci_id++; diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index 79d0a98b9d03..a98ae0e57272 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c @@ -19,6 +19,14 @@ int mp_bus_id_to_local[MAX_MP_BUSSES]; int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) +void mpc_oem_pci_bus(struct mpc_config_bus *m, + struct mpc_config_translation *translation) +{ + int quad = translation->trans_quad; + int local = translation->trans_local; + + quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; +} /* Where the IO area was mapped on multiquad, always 0 otherwise */ void *xquad_portio; diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5d024400ddde..5a1ae0164aa0 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -50,8 +50,6 @@ struct genapic { /* mpparse */ void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, struct mpc_config_translation *); - void (*mpc_oem_pci_bus)(struct mpc_config_bus *, - struct mpc_config_translation *); /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe @@ -106,7 +104,6 @@ struct genapic { APICFUNC(setup_portio_remap) \ APICFUNC(check_phys_apicid_present) \ APICFUNC(mpc_oem_bus_info) \ - APICFUNC(mpc_oem_pci_bus) \ APICFUNC(mps_oem_check) \ APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index 1d3832482580..679393c26dc6 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h @@ -7,11 +7,6 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, // Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 52ee75cd0fe1..3afa1b010818 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h @@ -9,11 +9,6 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - extern int parse_unisys_oem (char *oemptr); extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index dbd9fce54f4d..2a6937150334 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h @@ -4,7 +4,6 @@ #include #define mpc_oem_bus_info (genapic->mpc_oem_bus_info) -#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); int acpi_madt_oem_check(char *oem_id, char *oem_table_id); diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 254993ae04c7..4a5d8a8b01a2 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -15,16 +15,8 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); } -extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; - -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ - int quad = translation->trans_quad; - int local = translation->trans_local; - - quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; -} +extern void mpc_oem_pci_bus(struct mpc_config_bus *m, + struct mpc_config_translation *translation); /* Hook from generic ACPI tables.c */ static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h index c2520539d934..e1af489a8809 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/mach-summit/mach_mpparse.h @@ -18,11 +18,6 @@ static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { -- cgit v1.2.1 From 0ec153af4dec8944e6da558093914a3bce4c76f9 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:48 +0300 Subject: x86: remove mpc_oem_bus_info() Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++++ arch/x86/pci/numa.c | 12 ++++++++++++ include/asm-x86/genapic_32.h | 4 ---- include/asm-x86/mach-default/mach_mpparse.h | 6 ------ include/asm-x86/mach-es7000/mach_mpparse.h | 6 ------ include/asm-x86/mach-generic/mach_mpparse.h | 4 ---- include/asm-x86/mach-numaq/mach_mpparse.h | 16 ++-------------- include/asm-x86/mach-summit/mach_mpparse.h | 6 ------ 8 files changed, 18 insertions(+), 40 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 621eac569550..febd69dbbee9 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -265,7 +265,11 @@ static void __init MP_bus_info (struct mpc_config_bus *m) memcpy(str, m->mpc_bustype, 6); str[6] = 0; +#ifdef CONFIG_X86_NUMAQ mpc_oem_bus_info(m, str, translation_table[mpc_record]); +#else + Dprintk("Bus #%d is %s\n", m->mpc_busid, str); +#endif #if MAX_MP_BUSSES < 256 if (m->mpc_busid >= MAX_MP_BUSSES) { diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index a98ae0e57272..d9afbae5092b 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c @@ -17,6 +17,18 @@ int mp_bus_id_to_node[MAX_MP_BUSSES]; int mp_bus_id_to_local[MAX_MP_BUSSES]; #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) +void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, + struct mpc_config_translation *translation) +{ + int quad = translation->trans_quad; + int local = translation->trans_local; + + mp_bus_id_to_node[m->mpc_busid] = quad; + mp_bus_id_to_local[m->mpc_busid] = local; + printk(KERN_INFO "Bus #%d is %s (node %d)\n", + m->mpc_busid, name, quad); +} + int quad_local_to_mp_bus_id [NR_CPUS/4][4]; #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) void mpc_oem_pci_bus(struct mpc_config_bus *m, diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5a1ae0164aa0..5a1b68ac3ca9 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -48,9 +48,6 @@ struct genapic { u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); /* mpparse */ - void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, - struct mpc_config_translation *); - /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe functions. */ @@ -103,7 +100,6 @@ struct genapic { APICFUNC(apicid_to_cpu_present) \ APICFUNC(setup_portio_remap) \ APICFUNC(check_phys_apicid_present) \ - APICFUNC(mpc_oem_bus_info) \ APICFUNC(mps_oem_check) \ APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index 679393c26dc6..d14108505bb8 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h @@ -1,12 +1,6 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ -// Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 3afa1b010818..ef26d3523625 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h @@ -3,12 +3,6 @@ #include -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - extern int parse_unisys_oem (char *oemptr); extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index 2a6937150334..0d0b5ba2e9d1 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h @@ -1,10 +1,6 @@ #ifndef _MACH_MPPARSE_H #define _MACH_MPPARSE_H 1 -#include - -#define mpc_oem_bus_info (genapic->mpc_oem_bus_info) - int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); int acpi_madt_oem_check(char *oem_id, char *oem_table_id); diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 4a5d8a8b01a2..459b12401187 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h @@ -1,20 +1,8 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H -extern int mp_bus_id_to_local[MAX_MP_BUSSES]; -extern int mp_bus_id_to_node[MAX_MP_BUSSES]; - -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - int quad = translation->trans_quad; - int local = translation->trans_local; - - mp_bus_id_to_node[m->mpc_busid] = quad; - mp_bus_id_to_local[m->mpc_busid] = local; - printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); -} - +extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, + struct mpc_config_translation *translation); extern void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation); diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h index e1af489a8809..fdf591701339 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/mach-summit/mach_mpparse.h @@ -12,12 +12,6 @@ extern void setup_summit(void); #define setup_summit() {} #endif -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { -- cgit v1.2.1 From 864205062f1c752c80077be8ec2b15c81f4a6525 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Mon, 17 Mar 2008 22:08:55 +0300 Subject: x86: make struct mpc_config_translation NUMAQ-only Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 6 ++++++ include/asm-x86/genapic_32.h | 1 - include/asm-x86/mach-numaq/mach_apic.h | 10 ++++++++++ include/asm-x86/mpspec_def.h | 11 ----------- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index febd69dbbee9..15dd87110298 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -98,6 +98,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) return sum & 0xFF; } +#ifdef CONFIG_X86_NUMAQ /* * Have to match translation table entries to main table entries by counter * hence the mpc_record variable .... can't see a less disgusting way of @@ -106,6 +107,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) static int mpc_record; static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata; +#endif static void __cpuinit MP_processor_info (struct mpc_config_processor *m) { @@ -475,7 +477,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) /* * Now process the configuration blocks. */ +#ifdef CONFIG_X86_NUMAQ mpc_record = 0; +#endif while (count < mpc->mpc_length) { switch(*mpt) { case MP_PROCESSOR: @@ -532,7 +536,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) break; } } +#ifdef CONFIG_X86_NUMAQ ++mpc_record; +#endif } setup_apic_routing(); if (!num_processors) diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5a1b68ac3ca9..b501ae7809ba 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -14,7 +14,6 @@ * Copyright 2003 Andi Kleen, SuSE Labs. */ -struct mpc_config_translation; struct mpc_config_bus; struct mp_config_table; struct mpc_config_processor; diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 3b637fac890b..75a56e5afbe7 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h @@ -95,6 +95,16 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) return physid_mask_of_physid(cpu + 4*node); } +struct mpc_config_translation { + unsigned char mpc_type; + unsigned char trans_len; + unsigned char trans_type; + unsigned char trans_quad; + unsigned char trans_global; + unsigned char trans_local; + unsigned short trans_reserved; +}; + static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) { diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 3504617fe648..1f35691b4f7c 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h @@ -166,17 +166,6 @@ struct mp_config_oemtable char mpc_oem[8]; }; -struct mpc_config_translation -{ - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; -}; - /* * Default configurations * -- cgit v1.2.1 From 3250c91ada16a06de5afef55bce7b766c894d75c Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Thu, 20 Mar 2008 00:39:02 -0700 Subject: x86: vSMP: Fix is_vsmp_box() is_vsmp_box() currently does not work on vSMPowered systems, as pci cfg space is not read correctly -- This patch fixes it. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/kernel/vsmp_64.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index a00961d42e75..eb25584c54c3 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -84,8 +84,10 @@ int is_vsmp_box(void) return vsmp; /* Check if we are running on a ScaleMP vSMP box */ - if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == - (PCI_VENDOR_ID_SCALEMP || (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) + if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) == + PCI_VENDOR_ID_SCALEMP) && + (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) == + PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) vsmp = 1; return vsmp; -- cgit v1.2.1 From aa7d8e25eca5deb33eb08013bc78a80514349b40 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Thu, 20 Mar 2008 00:41:16 -0700 Subject: x86: fix build breakage when PCI is define and PARAVIRT is not - Fix the the build breakage when PARAVIRT is defined but PCI is not This fixes problem reported at: http://marc.info/?l=linux-kernel&m=120525966600698&w=2 - Make is_vsmp_box() available even when PARAVIRT is not defined. This is needed to determine if tsc's are reliable as a time source even when PARAVIRT is not defined. - split vsmp_init to use is_vsmp_box() and set_vsmp_pv_ops() set_vsmp_pv_ops will do nothing if PCI is not enabled in the config. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/setup_64.c | 2 -- arch/x86/kernel/vsmp_64.c | 72 +++++++++++++++++++++++++++------------------- include/asm-x86/apic.h | 7 ++--- 4 files changed, 46 insertions(+), 37 deletions(-) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d5a05a0cef62..1fe841a86f7e 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -60,7 +60,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o -obj-$(CONFIG_PARAVIRT) += vsmp_64.o +obj-y += vsmp_64.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_MODULES) += module_$(BITS).o obj-$(CONFIG_ACPI_SRAT) += srat_32.o diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 990724143c43..20034493b5ad 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -351,9 +351,7 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled) efi_init(); -#ifdef CONFIG_PARAVIRT vsmp_init(); -#endif dmi_scan_machine(); diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index eb25584c54c3..4a790a5f61b7 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -19,6 +19,7 @@ #include #include +#if defined CONFIG_PCI && defined CONFIG_PARAVIRT /* * Interrupt control on vSMPowered systems: * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' @@ -72,39 +73,11 @@ static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, } -static int vsmp = -1; - -int is_vsmp_box(void) -{ - if (vsmp != -1) - return vsmp; - - vsmp = 0; - if (!early_pci_allowed()) - return vsmp; - - /* Check if we are running on a ScaleMP vSMP box */ - if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) == - PCI_VENDOR_ID_SCALEMP) && - (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) == - PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) - vsmp = 1; - - return vsmp; -} - -void __init vsmp_init(void) +static void __init set_vsmp_pv_ops(void) { void *address; unsigned int cap, ctl, cfg; - if (!is_vsmp_box()) - return; - - if (!early_pci_allowed()) - return; - - /* If we are, use the distinguished irq functions */ pv_irq_ops.irq_disable = vsmp_irq_disable; pv_irq_ops.irq_enable = vsmp_irq_enable; pv_irq_ops.save_fl = vsmp_save_fl; @@ -127,5 +100,46 @@ void __init vsmp_init(void) } early_iounmap(address, 8); +} +#else +static void __init set_vsmp_pv_ops(void) +{ +} +#endif + +#ifdef CONFIG_PCI +static int vsmp = -1; + +int is_vsmp_box(void) +{ + if (vsmp != -1) + return vsmp; + + vsmp = 0; + if (!early_pci_allowed()) + return vsmp; + + /* Check if we are running on a ScaleMP vSMP box */ + if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) == + PCI_VENDOR_ID_SCALEMP) && + (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) == + PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) + vsmp = 1; + + return vsmp; +} +#else +int is_vsmp_box(void) +{ + return 0; +} +#endif + +void __init vsmp_init(void) +{ + if (!is_vsmp_box()) + return; + + set_vsmp_pv_ops(); return; } diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index b804238489a1..b11d6524fb13 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -50,19 +50,16 @@ extern int disable_apic_timer; */ #ifdef CONFIG_PARAVIRT #include -extern int is_vsmp_box(void); #else #define apic_write native_apic_write #define apic_write_atomic native_apic_write_atomic #define apic_read native_apic_read #define setup_boot_clock setup_boot_APIC_clock #define setup_secondary_clock setup_secondary_APIC_clock -static int inline is_vsmp_box(void) -{ - return 0; -} #endif +extern int is_vsmp_box(void); + static inline void native_apic_write(unsigned long reg, u32 v) { *((volatile u32 *)(APIC_BASE + reg)) = v; -- cgit v1.2.1 From 9f6d8552a9cb49dc556777bbdf7ac8b3d7e18edb Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Thu, 20 Mar 2008 00:43:16 -0700 Subject: x86: vSMP: use pvops only if platform has the capability to support it Re-arrange set_vsmp_pv_ops so that pv_ops are set only if the platform has capability to support paravirtualized irq ops Signed-off-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/kernel/vsmp_64.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 4a790a5f61b7..13bd82453e4b 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -78,12 +78,6 @@ static void __init set_vsmp_pv_ops(void) void *address; unsigned int cap, ctl, cfg; - pv_irq_ops.irq_disable = vsmp_irq_disable; - pv_irq_ops.irq_enable = vsmp_irq_enable; - pv_irq_ops.save_fl = vsmp_save_fl; - pv_irq_ops.restore_fl = vsmp_restore_fl; - pv_init_ops.patch = vsmp_patch; - /* set vSMP magic bits to indicate vSMP capable kernel */ cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0); address = early_ioremap(cfg, 8); @@ -92,7 +86,13 @@ static void __init set_vsmp_pv_ops(void) printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl); if (cap & ctl & (1 << 4)) { - /* Turn on vSMP IRQ fastpath handling (see system.h) */ + /* Setup irq ops and turn on vSMP IRQ fastpath handling */ + pv_irq_ops.irq_disable = vsmp_irq_disable; + pv_irq_ops.irq_enable = vsmp_irq_enable; + pv_irq_ops.save_fl = vsmp_save_fl; + pv_irq_ops.restore_fl = vsmp_restore_fl; + pv_init_ops.patch = vsmp_patch; + ctl &= ~(1 << 4); writel(ctl, address + 4); ctl = readl(address + 4); -- cgit v1.2.1 From 1cb68487f5898dd97460e5b6bda9619ec3549361 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Thu, 20 Mar 2008 00:45:08 -0700 Subject: x86: apic_is_clustered_box to indicate unsynched TSC's on multiboard vSMP systems Indicate TSCs are unreliable as time sources if the platform is a multi chassi ScaleMP vSMPowered machine. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 868ec1deb19a..5362cfd30ecd 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -1209,7 +1209,7 @@ __cpuinit int apic_is_clustered_box(void) * will be [4, 0x23] or [8, 0x27] could be thought to * vsmp box still need checking... */ - if (!is_vsmp_box() && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) return 0; bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; @@ -1249,6 +1249,12 @@ __cpuinit int apic_is_clustered_box(void) ++zeros; } + /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are + * not guaranteed to be synced between boards + */ + if (is_vsmp_box() && clusters > 1) + return 1; + /* * If clusters > 2, then should be multi-chassis. * May have to revisit this when multi-core + hyperthreaded CPUs come -- cgit v1.2.1 From 6542fe80e6296cde50c1c3b8a9eede701ee51907 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 21 Mar 2008 09:55:06 +0100 Subject: x86: vsmp fix x86 vsmp fix is vsmp box cleanup code got a bit smaller: arch/x86/kernel/vsmp_64.o: text data bss dec hex filename 205 4 0 209 d1 vsmp_64.o.before 181 4 0 185 b9 vsmp_64.o.after Signed-off-by: Ingo Molnar --- arch/x86/kernel/vsmp_64.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 13bd82453e4b..1e9a791dbe39 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -120,10 +120,8 @@ int is_vsmp_box(void) return vsmp; /* Check if we are running on a ScaleMP vSMP box */ - if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) == - PCI_VENDOR_ID_SCALEMP) && - (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) == - PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) + if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == + (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) vsmp = 1; return vsmp; -- cgit v1.2.1 From ede1389f8ab4f3a1343e567133fa9720a054a3aa Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 17 Mar 2008 22:29:32 +0200 Subject: x86: remove the write-only timer_uses_ioapic_pin_0 This patch removes the write-only timer_uses_ioapic_pin_0 (gsi can't be <= 15 in the line of it's fake usage in mpparse_32.c). Spotted by the GNU C compiler. Signed-off-by: Adrian Bunk Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 5 ----- arch/x86/kernel/mpparse_32.c | 3 +-- include/asm-x86/io_apic.h | 1 - 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 0d70acd3b134..742fab45e1c6 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -2114,8 +2114,6 @@ static inline void unlock_ExtINT_logic(void) ioapic_write_entry(apic, pin, entry0); } -int timer_uses_ioapic_pin_0; - /* * This code may look a bit paranoid, but it's supposed to cooperate with * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ @@ -2155,9 +2153,6 @@ static inline void __init check_timer(void) pin2 = ioapic_i8259.pin; apic2 = ioapic_i8259.apic; - if (pin1 == 0) - timer_uses_ioapic_pin_0 = 1; - printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", vector, apic1, pin1, apic2, pin2); diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 15dd87110298..15265ee11f89 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -1176,8 +1176,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) * So test for this condition, and if necessary, avoid * the pin collision. */ - if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0)) - gsi = pci_irq++; + gsi = pci_irq++; /* * Don't assign IRQ used by ACPI SCI */ diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 0f5b3fef0b08..095e8e30a345 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h @@ -146,7 +146,6 @@ extern int io_apic_get_version(int ioapic); extern int io_apic_get_redir_entries(int ioapic); extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low); -extern int timer_uses_ioapic_pin_0; #endif /* CONFIG_ACPI */ extern int (*ioapic_renumber_irq)(int ioapic, int irq); -- cgit v1.2.1 From 8ab32bb89b5b9bf06147c31947eba65f0f21c3c0 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Sun, 16 Mar 2008 21:57:41 -0700 Subject: x86: ia32 ptrace vs -ENOSYS When we're stopped at syscall entry tracing, ptrace can change the %eax value from -ENOSYS to something else. If no system call is actually made because the syscall number (now in orig_eax) is bad, then the %eax value set by ptrace should be returned to the user. But, instead it gets reset to -ENOSYS again. This is a regression from the native 32-bit kernel. This change fixes it by leaving the return value alone after entry tracing. Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 8022d3c695c0..30f492934f23 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -325,7 +325,7 @@ ENTRY(ia32_syscall) jnz ia32_tracesys ia32_do_syscall: cmpl $(IA32_NR_syscalls-1),%eax - ja ia32_badsys + ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ IA32_ARG_FIXUP call *ia32_sys_call_table(,%rax,8) # xxx: rip relative ia32_sysret: @@ -335,7 +335,7 @@ ia32_sysret: ia32_tracesys: SAVE_REST CLEAR_RREGS - movq $-ENOSYS,RAX(%rsp) /* really needed? */ + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ -- cgit v1.2.1 From a31f8dd7ee3b2f5645c220406b1e21f82971f32b Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Sun, 16 Mar 2008 21:59:11 -0700 Subject: x86: ptrace vs -ENOSYS When we're stopped at syscall entry tracing, ptrace can change the %rax value from -ENOSYS to something else. If no system call is actually made because the syscall number (now in orig_rax) is bad, then we now always reset %rax to -ENOSYS again. This changes it to leave the return value alone after entry tracing. That way, the %rax value set by ptrace is there to be seen in user mode (or in syscall exit tracing). This is consistent with what the 32-bit kernel does. Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c20c9e7e08dd..556a8df522a7 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -319,19 +319,17 @@ badsys: /* Do syscall tracing */ tracesys: SAVE_REST - movq $-ENOSYS,RAX(%rsp) + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ FIXUP_TOP_OF_STACK %rdi movq %rsp,%rdi call syscall_trace_enter LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST cmpq $__NR_syscall_max,%rax - movq $-ENOSYS,%rcx - cmova %rcx,%rax - ja 1f + ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ movq %r10,%rcx /* fixup for C */ call *sys_call_table(,%rax,8) -1: movq %rax,RAX-ARGOFFSET(%rsp) + movq %rax,RAX-ARGOFFSET(%rsp) /* Use IRET because user could have changed frame */ /* -- cgit v1.2.1 From 48ee679a02406c65ced67c3951ad19744eb21083 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 18 Mar 2008 18:23:50 -0700 Subject: x86: ia32 ptrace vs -ENOSYS sysenter/syscall The previous "x86_64 ia32 ptrace vs -ENOSYS" fix only covered the int $0x80 system call entries. This does the same fix for the sysenter and syscall instruction paths. Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32entry.S | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 30f492934f23..ae7158bce4d6 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -162,12 +162,14 @@ sysenter_tracesys: SAVE_REST CLEAR_RREGS movq %r9,R9(%rsp) - movq $-ENOSYS,RAX(%rsp) /* really needed? */ + movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST xchgl %ebp,%r9d + cmpl $(IA32_NR_syscalls-1),%eax + ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ jmp sysenter_do_call CFI_ENDPROC ENDPROC(ia32_sysenter_target) @@ -261,13 +263,15 @@ cstar_tracesys: SAVE_REST CLEAR_RREGS movq %r9,R9(%rsp) - movq $-ENOSYS,RAX(%rsp) /* really needed? */ + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST xchgl %ebp,%r9d movl RSP-ARGOFFSET(%rsp), %r8d + cmpl $(IA32_NR_syscalls-1),%eax + ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ jmp cstar_do_call END(ia32_cstar_target) -- cgit v1.2.1 From b00de174e3d52d9185e2e9937a6a2704c9d7d520 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Sun, 16 Mar 2008 22:00:05 -0700 Subject: x86: sys32_execve PT_DTRACE The PT_DTRACE flag is meaningless and obsolete. Don't touch it. Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/ia32/sys_ia32.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 44d931f08414..7cede7a9e0dc 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -780,11 +780,6 @@ asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, if (IS_ERR(filename)) return error; error = compat_do_execve(filename, argv, envp, regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); return error; } -- cgit v1.2.1 From 0f54091051c450bab751c3ca0cb45d61a67a683b Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Mon, 17 Mar 2008 02:21:08 -0700 Subject: x86: handle_vm86_trap cleanup Use force_sig in handle_vm86_trap like other machine traps do. Signed-off-by: Roland McGrath Signed-off-by: Ingo Molnar --- arch/x86/kernel/vm86_32.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 6a91fcf92d67..51040698c222 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -557,16 +557,9 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) } if (trapno != 1) return 1; /* we let this handle by the calling routine */ - if (current->ptrace & PT_PTRACED) { - unsigned long flags; - spin_lock_irqsave(¤t->sighand->siglock, flags); - sigdelset(¤t->blocked, SIGTRAP); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - } - send_sig(SIGTRAP, current, 1); current->thread.trap_no = trapno; current->thread.error_code = error_code; + force_sig(SIGTRAP, current); return 0; } -- cgit v1.2.1 From acb5b8a2dd0f901463d075382ea548935e679f4e Mon Sep 17 00:00:00 2001 From: Yakov Lerner Date: Sun, 16 Mar 2008 03:21:21 -0500 Subject: x86, kprobes: correct post-eip value in post_hander() I was trying to get the address of instruction to be executed next after the kprobed instruction. But regs->eip in post_handler() contains value which is useless to the user. It's pre-corrected value. This value is difficult to use without access to resume_execution(), which is not exported anyway. I moved the invocation of post_handler() to *after* resume_execution(). Now regs->eip contains meaningful value in post_handler(). I do not think this change breaks any backward-compatibility. To make meaning of the old value, post_handler() would need access to resume_execution() which is not exported. I have difficulty to believe that previous, uncorrected, regs->eip can be meaningfully used in post_handler(). Signed-off-by: Yakov Lerner Acked-by: Ananth N Mavinakayanahalli Acked-by: Masami Hiramatsu Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 1e3de7db9ad5..cc8ae90103ff 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -858,15 +858,15 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) if (!cur) return 0; + resume_execution(cur, regs, kcb); + regs->flags |= kcb->kprobe_saved_flags; + trace_hardirqs_fixup_flags(regs->flags); + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } - resume_execution(cur, regs, kcb); - regs->flags |= kcb->kprobe_saved_flags; - trace_hardirqs_fixup_flags(regs->flags); - /* Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); -- cgit v1.2.1 From f7d909d5475bb27d261389a3902860e086b0d4c9 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 14 Mar 2008 07:56:32 +0000 Subject: x86: simplify sync_test_bit() There really is no need for a redundant implementation here, just keep the alternative name for allowing consumers to use consistent naming. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/sync_bitops.h | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index 6b775c905666..bc249f40e0ee 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -123,26 +123,7 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) return oldbit; } -static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *)addr)[nr >> 5])) != 0; -} - -static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); - return oldbit; -} - -#define sync_test_bit(nr,addr) \ - (__builtin_constant_p(nr) ? \ - sync_constant_test_bit((nr),(addr)) : \ - sync_var_test_bit((nr),(addr))) +#define sync_test_bit test_bit #undef ADDR -- cgit v1.2.1 From 0054f4b708d21bd0032480cf7309d17753bc17bb Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 13 Mar 2008 21:47:32 -0400 Subject: x86: Explicitly include required header files. After an experimental cleanup of , these files were exposed as invoking kmalloc() without including . Signed-off-by: Robert P. J. Day Signed-off-by: Ingo Molnar --- arch/x86/kernel/nmi_32.c | 1 + arch/x86/kernel/test_nx.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 9cfc094eddb0..662e63e1cd57 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c index 10b8a6f69f84..787a5e499dd1 100644 --- a/arch/x86/kernel/test_nx.c +++ b/arch/x86/kernel/test_nx.c @@ -11,6 +11,8 @@ */ #include #include +#include + #include #include -- cgit v1.2.1 From a5ae1c372dc5bbaee905bcede524d7180d22b362 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 13 Mar 2008 19:44:56 +0300 Subject: x86: processor.h - use PAGE_SIZE instead of numeric value This patch replaces numeric constant with an appropriate macro Also 0x800000000000UL is changed to bit shifting which is complement to the code comment (thanks hpa for notice) Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 40227c9bf51b..b0dece41dbc4 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -870,7 +870,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE64 (0x800000000000UL - 4096) +#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. -- cgit v1.2.1 From 272b9cad6e7a2f61b13cfcd7dde0010e02e9376e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 20 Mar 2008 23:58:33 -0700 Subject: x86: early memtest to find bad ram do simple memtest after init_memory_mapping use find_e820_area_size to find all ram range that is not reserved. and do some simple bits test to find some bad ram. if find some bad ram, use reserve_early to exclude that range. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820_64.c | 70 +++++++++++++++++++++++++++++- arch/x86/mm/init_64.c | 106 +++++++++++++++++++++++++++++++++++++++++++++- include/asm-x86/e820_64.h | 3 ++ 3 files changed, 177 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 4a0953857cb2..4509757844eb 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -114,6 +114,40 @@ again: return changed; } +/* Check for already reserved areas */ +static inline int +bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align) +{ + int i; + unsigned long addr = *addrp, last; + unsigned long size = *sizep; + int changed = 0; +again: + last = addr + size; + for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { + struct early_res *r = &early_res[i]; + if (last > r->start && addr < r->start) { + size = r->start - addr; + changed = 1; + goto again; + } + if (last > r->end && addr < r->end) { + addr = round_up(r->end, align); + size = last - addr; + changed = 1; + goto again; + } + if (last <= r->end && addr >= r->start) { + (*sizep)++; + return 0; + } + } + if (changed) { + *addrp = addr; + *sizep = size; + } + return changed; +} /* * This function checks if any part of the range is mapped * with type. @@ -190,7 +224,7 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, ei_last = ei->addr + ei->size; if (addr < start) addr = round_up(start, align); - if (addr > ei_last) + if (addr >= ei_last) continue; while (bad_addr(&addr, size, align) && addr+size <= ei_last) ; @@ -204,6 +238,40 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, return -1UL; } +/* + * Find next free range after *start + */ +unsigned long __init find_e820_area_size(unsigned long start, unsigned long *sizep, unsigned long align) +{ + int i; + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + unsigned long addr, last; + unsigned long ei_last; + + if (ei->type != E820_RAM) + continue; + addr = round_up(ei->addr, align); + ei_last = ei->addr + ei->size; +// printk(KERN_DEBUG "find_e820_area_size : e820 %d [%llx, %lx]\n", i, ei->addr, ei_last); + if (addr < start) + addr = round_up(start, align); +// printk(KERN_DEBUG "find_e820_area_size : 0 [%lx, %lx]\n", addr, ei_last); + if (addr >= ei_last) + continue; + *sizep = ei_last - addr; + while (bad_addr_size(&addr, sizep, align) && addr+ *sizep <= ei_last) + ; + last = addr + *sizep; +// printk(KERN_DEBUG "find_e820_area_size : 1 [%lx, %lx]\n", addr, last); + if (last > ei_last) + continue; + return addr; + } + return -1UL; + +} /* * Find the highest page frame number we have available */ diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 255e51feb157..52f54ee4559f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -427,6 +427,106 @@ static void __init init_gbpages(void) direct_gbpages = 0; } +static void __init memtest(unsigned long start_phys, unsigned long size, unsigned pattern) +{ + unsigned long i; + unsigned long *start; + unsigned long start_bad; + unsigned long last_bad; + unsigned long val; + unsigned long start_phys_aligned; + unsigned long count; + unsigned long incr; + + switch (pattern) { + case 0: + val = 0UL; + break; + case 1: + val = -1UL; + break; + case 2: + val = 0x5555555555555555UL; + break; + case 3: + val = 0xaaaaaaaaaaaaaaaaUL; + break; + default: + return; + } + + incr = sizeof(unsigned long); + start_phys_aligned = ALIGN(start_phys, incr); + count = (size - (start_phys_aligned - start_phys))/incr; + start = __va(start_phys_aligned); + start_bad = 0; + last_bad = 0; + + for (i = 0; i < count; i++) + start[i] = val; + for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { + if (*start != val) { + if (start_phys_aligned == last_bad + incr) { + last_bad += incr; + } else { + if (start_bad) { + printk(KERN_INFO " %016lxx bad mem addr %016lx - %016lx reserved\n", + val, start_bad, last_bad + incr); + reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); + } + start_bad = last_bad = start_phys_aligned; + } + } + } + if (start_bad) { + printk(KERN_INFO " %016lx bad mem addr %016lx - %016lx reserved\n", + val, start_bad, last_bad + incr); + reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); + } + +} + +static int __initdata memtest_pattern; +static int __init parse_memtest(char *arg) +{ + if (arg) + memtest_pattern = simple_strtoul(arg, NULL, 0) + 1; + return 0; +} + +early_param("memtest", parse_memtest); + +static void __init early_memtest(unsigned long start, unsigned long end) +{ + unsigned long t_start, t_size; + unsigned pattern; + + if (memtest_pattern) + printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); + for (pattern = 0; pattern < memtest_pattern; pattern++) { + t_start = start; + t_size = 0; + while (t_start < end) { + t_start = find_e820_area_size(t_start, &t_size, 1); + + /* done ? */ + if (t_start >= end) + break; + if (t_start + t_size > end) + t_size = end - t_start; + + printk(KERN_CONT "\n %016lx - %016lx pattern %d", + t_start, t_start + t_size, pattern); + + memtest(t_start, t_size, pattern); + + t_start += t_size; + } + } + if (memtest_pattern) + printk(KERN_CONT "\n"); +} + /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from @@ -435,8 +535,9 @@ static void __init init_gbpages(void) void __init_refok init_memory_mapping(unsigned long start, unsigned long end) { unsigned long next; + unsigned long start_phys = start, end_phys = end; - pr_debug("init_memory_mapping\n"); + printk(KERN_INFO "init_memory_mapping\n"); /* * Find space for the kernel direct mapping tables. @@ -479,6 +580,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) if (!after_bootmem) reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT, "PGTABLE"); + + if (!after_bootmem) + early_memtest(start_phys, end_phys); } #ifndef CONFIG_NUMA diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index ef653a403e0b..d38820b31c1c 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -16,6 +16,9 @@ #ifndef __ASSEMBLY__ extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned long size, unsigned long align); +extern unsigned long find_e820_area_size(unsigned long start, + unsigned long *sizep, + unsigned long align); extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void update_memory_range(u64 start, u64 size, unsigned old_type, -- cgit v1.2.1 From 01561264bd1ea1d654d09babe02d784a5b150124 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 20 Mar 2008 23:57:21 -0700 Subject: x86: allocate e820 resource struct all together don't need to allocate that one by one Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820_64.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 4509757844eb..9184e6437c4f 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -300,9 +300,10 @@ unsigned long __init e820_end_of_ram(void) void __init e820_reserve_resources(void) { int i; + struct resource *res; + + res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); for (i = 0; i < e820.nr_map; i++) { - struct resource *res; - res = alloc_bootmem_low(sizeof(struct resource)); switch (e820.map[i].type) { case E820_RAM: res->name = "System RAM"; break; case E820_ACPI: res->name = "ACPI Tables"; break; @@ -313,6 +314,7 @@ void __init e820_reserve_resources(void) res->end = res->start + e820.map[i].size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; insert_resource(&iomem_resource, res); + res++; } } -- cgit v1.2.1 From 7de6a4cdac6341807261a33896f9ab5a502a4e74 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Thu, 13 Mar 2008 11:03:58 +0100 Subject: x86: clean up aperture_64.c Initializing to zero is generally bad idea, I hope it is right for __init data, too. Signed-off-by: Pavel Machek Signed-off-by: Ingo Molnar --- arch/x86/kernel/aperture_64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 00df126169b4..479926d9e004 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -27,11 +27,11 @@ #include int gart_iommu_aperture; -int gart_iommu_aperture_disabled __initdata = 0; -int gart_iommu_aperture_allowed __initdata = 0; +int gart_iommu_aperture_disabled __initdata; +int gart_iommu_aperture_allowed __initdata; int fallback_aper_order __initdata = 1; /* 64MB */ -int fallback_aper_force __initdata = 0; +int fallback_aper_force __initdata; int fix_aperture __initdata = 1; -- cgit v1.2.1 From d27554d874c7eeb14c8bfecdc39c3a8618cd8d32 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:13 -0700 Subject: x86: PAT documentation Documentation about PAT related interfaces, intended usage and memory attribute relationship. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- Documentation/x86/pat.txt | 100 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 Documentation/x86/pat.txt diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt new file mode 100644 index 000000000000..17965f927c15 --- /dev/null +++ b/Documentation/x86/pat.txt @@ -0,0 +1,100 @@ + +PAT (Page Attribute Table) + +x86 Page Attribute Table (PAT) allows for setting the memory attribute at the +page level granularity. PAT is complementary to the MTRR settings which allows +for setting of memory types over physical address ranges. However, PAT is +more flexible than MTRR due to its capability to set attributes at page level +and also due to the fact that there are no hardware limitations on number of +such attribute settings allowed. Added flexibility comes with guidelines for +not having memory type aliasing for the same physical memory with multiple +virtual addresses. + +PAT allows for different types of memory attributes. The most commonly used +ones that will be supported at this time are Write-back, Uncached, +Write-combined and Uncached Minus. + +There are many different APIs in the kernel that allows setting of memory +attributes at the page level. In order to avoid aliasing, these interfaces +should be used thoughtfully. Below is a table of interfaces available, +their intended usage and their memory attribute relationships. Internally, +these APIs use a reserve_memtype()/free_memtype() interface on the physical +address range to avoid any aliasing. + + +------------------------------------------------------------------- +API | RAM | ACPI,... | Reserved/Holes | +-----------------------|----------|------------|------------------| + | | | | +ioremap | -- | UC | UC | + | | | | +ioremap_cache | -- | WB | WB | + | | | | +ioremap_nocache | -- | UC | UC | + | | | | +ioremap_wc | -- | -- | WC | + | | | | +set_memory_uc | UC | -- | -- | + set_memory_wb | | | | + | | | | +set_memory_wc | WC | -- | -- | + set_memory_wb | | | | + | | | | +pci sysfs resource | -- | -- | UC | + | | | | +pci sysfs resource_wc | -- | -- | WC | + is IORESOURCE_PREFETCH| | | | + | | | | +pci proc | -- | -- | UC | + !PCIIOC_WRITE_COMBINE | | | | + | | | | +pci proc | -- | -- | WC | + PCIIOC_WRITE_COMBINE | | | | + | | | | +/dev/mem | -- | UC | UC | + read-write | | | | + | | | | +/dev/mem | -- | UC | UC | + mmap SYNC flag | | | | + | | | | +/dev/mem | -- | WB/WC/UC | WB/WC/UC | + mmap !SYNC flag | |(from exist-| (from exist- | + and | | ing alias)| ing alias) | + any alias to this area| | | | + | | | | +/dev/mem | -- | WB | WB | + mmap !SYNC flag | | | | + no alias to this area | | | | + and | | | | + MTRR says WB | | | | + | | | | +/dev/mem | -- | -- | UC_MINUS | + mmap !SYNC flag | | | | + no alias to this area | | | | + and | | | | + MTRR says !WB | | | | + | | | | +------------------------------------------------------------------- + +Notes: + +-- in the above table mean "Not suggested usage for the API". Some of the --'s +are strictly enforced by the kernel. Some others are not really enforced +today, but may be enforced in future. + +For ioremap and pci access through /sys or /proc - The actual type returned +can be more restrictive, in case of any existing aliasing for that address. +For example: If there is an existing uncached mapping, a new ioremap_wc can +return uncached mapping in place of write-combine requested. + +set_memory_[uc|wc] and set_memory_wb should be used in pairs, where driver will +first make a region uc or wc and switch it back to wb after use. + +Over time writes to /proc/mtrr will be deprecated in favor of using PAT based +interfaces. Users writing to /proc/mtrr are suggested to use above interfaces. + +Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access +types. + +Drivers should use set_memory_[uc|wc] to set access type for RAM ranges. + -- cgit v1.2.1 From 2e5d9c857d4e6c9e7b7d8c8c86a68a7842d213d6 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:14 -0700 Subject: x86: PAT infrastructure patch Sets up pat_init() infrastructure. PAT MSR has following setting. PAT |PCD ||PWT ||| 000 WB _PAGE_CACHE_WB 001 WC _PAGE_CACHE_WC 010 UC- _PAGE_CACHE_UC_MINUS 011 UC _PAGE_CACHE_UC We are effectively changing WT from boot time setting to WC. UC_MINUS is used to provide backward compatibility to existing /dev/mem users(X). reserve_memtype and free_memtype are new interfaces for maintaining alias-free mapping. It is currently implemented in a simple way with a linked list and not optimized. reserve and free tracks the effective memory type, as a result of PAT and MTRR setting rather than what is actually requested in PAT. pat_init piggy backs on mtrr_init as the rules for setting both pat and mtrr are same. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 15 ++ arch/x86/kernel/cpu/mtrr/generic.c | 120 +++++++++++ arch/x86/mm/Makefile | 3 +- arch/x86/mm/pageattr.c | 4 +- arch/x86/mm/pat.c | 402 +++++++++++++++++++++++++++++++++++++ include/asm-x86/cpufeature.h | 1 + include/asm-x86/msr-index.h | 2 + include/asm-x86/mtrr.h | 2 + include/asm-x86/pat.h | 16 ++ include/asm-x86/pgtable.h | 6 + 10 files changed, 568 insertions(+), 3 deletions(-) create mode 100644 arch/x86/mm/pat.c create mode 100644 include/asm-x86/pat.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fd27048087b8..5b46756e4c7c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1009,6 +1009,21 @@ config MTRR See for more information. +config X86_PAT + def_bool y + prompt "x86 PAT support" + depends on MTRR && NONPROMISC_DEVMEM + help + Use PAT attributes to setup page level cache control. + ---help--- + PATs are the modern equivalents of MTRRs and are much more + flexible than MTRRs. + + Say N here if you see bootup problems (boot crash, boot hang, + spontaneous reboots) or a non-working Xorg. + + If unsure, say Y. + config EFI def_bool n prompt "EFI runtime service support" diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 3e18db4cefee..011e07e99cd1 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "mtrr.h" struct mtrr_state { @@ -35,6 +36,7 @@ static struct fixed_range_block fixed_range_blocks[] = { static unsigned long smp_changes_mask; static struct mtrr_state mtrr_state = {}; +static int mtrr_state_set; #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "mtrr." @@ -42,6 +44,106 @@ static struct mtrr_state mtrr_state = {}; static int mtrr_show; module_param_named(show, mtrr_show, bool, 0); +/* + * Returns the effective MTRR type for the region + * Error returns: + * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR + * - 0xFF - when MTRR is not enabled + */ +u8 mtrr_type_lookup(u64 start, u64 end) +{ + int i; + u64 base, mask; + u8 prev_match, curr_match; + + if (!mtrr_state_set) + return 0xFF; + + if (!mtrr_state.enabled) + return 0xFF; + + /* Make end inclusive end, instead of exclusive */ + end--; + + /* Look in fixed ranges. Just return the type as per start */ + if (mtrr_state.have_fixed && (start < 0x100000)) { + int idx; + + if (start < 0x80000) { + idx = 0; + idx += (start >> 16); + return mtrr_state.fixed_ranges[idx]; + } else if (start < 0xC0000) { + idx = 1 * 8; + idx += ((start - 0x80000) >> 14); + return mtrr_state.fixed_ranges[idx]; + } else if (start < 0x1000000) { + idx = 3 * 8; + idx += ((start - 0xC0000) >> 12); + return mtrr_state.fixed_ranges[idx]; + } + } + + /* + * Look in variable ranges + * Look of multiple ranges matching this address and pick type + * as per MTRR precedence + */ + if (!mtrr_state.enabled & 2) { + return mtrr_state.def_type; + } + + prev_match = 0xFF; + for (i = 0; i < num_var_ranges; ++i) { + unsigned short start_state, end_state; + + if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) + continue; + + base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + + (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); + mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + + (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); + + start_state = ((start & mask) == (base & mask)); + end_state = ((end & mask) == (base & mask)); + if (start_state != end_state) + return 0xFE; + + if ((start & mask) != (base & mask)) { + continue; + } + + curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; + if (prev_match == 0xFF) { + prev_match = curr_match; + continue; + } + + if (prev_match == MTRR_TYPE_UNCACHABLE || + curr_match == MTRR_TYPE_UNCACHABLE) { + return MTRR_TYPE_UNCACHABLE; + } + + if ((prev_match == MTRR_TYPE_WRBACK && + curr_match == MTRR_TYPE_WRTHROUGH) || + (prev_match == MTRR_TYPE_WRTHROUGH && + curr_match == MTRR_TYPE_WRBACK)) { + prev_match = MTRR_TYPE_WRTHROUGH; + curr_match = MTRR_TYPE_WRTHROUGH; + } + + if (prev_match != curr_match) { + return MTRR_TYPE_UNCACHABLE; + } + } + + if (prev_match != 0xFF) + return prev_match; + + return mtrr_state.def_type; +} + /* Get the MSR pair relating to a var range */ static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) @@ -79,12 +181,16 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) base, base + step - 1, mtrr_attrib_to_str(*types)); } +static void prepare_set(void); +static void post_set(void); + /* Grab all of the MTRR state for this CPU into *state */ void __init get_mtrr_state(void) { unsigned int i; struct mtrr_var_range *vrs; unsigned lo, dummy; + unsigned long flags; vrs = mtrr_state.var_ranges; @@ -131,6 +237,17 @@ void __init get_mtrr_state(void) printk(KERN_INFO "MTRR %u disabled\n", i); } } + mtrr_state_set = 1; + + /* PAT setup for BP. We need to go through sync steps here */ + local_irq_save(flags); + prepare_set(); + + pat_init(); + + post_set(); + local_irq_restore(flags); + } /* Some BIOS's are fucked and don't set all MTRRs the same! */ @@ -397,6 +514,9 @@ static void generic_set_all(void) /* Actually set the state */ mask = set_mtrr_state(); + /* also set PAT */ + pat_init(); + post_set(); local_irq_restore(flags); diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 9ab9889863f0..20941d2954e2 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,4 +1,5 @@ -obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o +obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ + pat.o obj-$(CONFIG_X86_32) += pgtable_32.o diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 6cdfc0fd68be..f7d5ca170c22 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -773,14 +773,14 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages, int set_memory_uc(unsigned long addr, int numpages) { return change_page_attr_set(addr, numpages, - __pgprot(_PAGE_PCD)); + __pgprot(_PAGE_CACHE_UC)); } EXPORT_SYMBOL(set_memory_uc); int set_memory_wb(unsigned long addr, int numpages) { return change_page_attr_clear(addr, numpages, - __pgprot(_PAGE_PCD | _PAGE_PWT)); + __pgprot(_PAGE_CACHE_MASK)); } EXPORT_SYMBOL(set_memory_wb); diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c new file mode 100644 index 000000000000..7cc71d868483 --- /dev/null +++ b/arch/x86/mm/pat.c @@ -0,0 +1,402 @@ +/* + * Handle caching attributes in page tables (PAT) + * + * Authors: Venkatesh Pallipadi + * Suresh B Siddha + * + * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int pat_wc_enabled = 1; + +static u64 __read_mostly boot_pat_state; + +static int nopat(char *str) +{ + pat_wc_enabled = 0; + printk(KERN_INFO "x86: PAT support disabled.\n"); + + return 0; +} +early_param("nopat", nopat); + +static int pat_known_cpu(void) +{ + if (!pat_wc_enabled) + return 0; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + (boot_cpu_data.x86 == 0xF || + (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 15))) { + if (cpu_has_pat) { + return 1; + } + } + + pat_wc_enabled = 0; + printk(KERN_INFO "CPU and/or kernel does not support PAT.\n"); + return 0; +} + +enum { + PAT_UC = 0, /* uncached */ + PAT_WC = 1, /* Write combining */ + PAT_WT = 4, /* Write Through */ + PAT_WP = 5, /* Write Protected */ + PAT_WB = 6, /* Write Back (default) */ + PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ +}; + +#define PAT(x,y) ((u64)PAT_ ## y << ((x)*8)) + +void pat_init(void) +{ + u64 pat; + +#ifndef CONFIG_X86_PAT + nopat(NULL); +#endif + + /* Boot CPU enables PAT based on CPU feature */ + if (!smp_processor_id() && !pat_known_cpu()) + return; + + /* APs enable PAT iff boot CPU has enabled it before */ + if (smp_processor_id() && !pat_wc_enabled) + return; + + /* Set PWT to Write-Combining. All other bits stay the same */ + /* + * PTE encoding used in Linux: + * PAT + * |PCD + * ||PWT + * ||| + * 000 WB _PAGE_CACHE_WB + * 001 WC _PAGE_CACHE_WC + * 010 UC- _PAGE_CACHE_UC_MINUS + * 011 UC _PAGE_CACHE_UC + * PAT bit unused + */ + pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) | + PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); + + /* Boot CPU check */ + if (!smp_processor_id()) { + rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); + } + + wrmsrl(MSR_IA32_CR_PAT, pat); + printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", + smp_processor_id(), boot_pat_state, pat); +} + +#undef PAT + +static char *cattr_name(unsigned long flags) +{ + switch (flags & _PAGE_CACHE_MASK) { + case _PAGE_CACHE_UC: return "uncached"; + case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; + case _PAGE_CACHE_WB: return "write-back"; + case _PAGE_CACHE_WC: return "write-combining"; + default: return "broken"; + } +} + +/* + * The global memtype list keeps track of memory type for specific + * physical memory areas. Conflicting memory types in different + * mappings can cause CPU cache corruption. To avoid this we keep track. + * + * The list is sorted based on starting address and can contain multiple + * entries for each address (this allows reference counting for overlapping + * areas). All the aliases have the same cache attributes of course. + * Zero attributes are represented as holes. + * + * Currently the data structure is a list because the number of mappings + * are expected to be relatively small. If this should be a problem + * it could be changed to a rbtree or similar. + * + * memtype_lock protects the whole list. + */ + +struct memtype { + u64 start; + u64 end; + unsigned long type; + struct list_head nd; +}; + +static LIST_HEAD(memtype_list); +static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ + +/* + * Does intersection of PAT memory type and MTRR memory type and returns + * the resulting memory type as PAT understands it. + * (Type in pat and mtrr will not have same value) + * The intersection is based on "Effective Memory Type" tables in IA-32 + * SDM vol 3a + */ +static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, + unsigned long *ret_prot) +{ + unsigned long pat_type; + u8 mtrr_type; + + mtrr_type = mtrr_type_lookup(start, end); + if (mtrr_type == 0xFF) { /* MTRR not enabled */ + *ret_prot = prot; + return 0; + } + if (mtrr_type == 0xFE) { /* MTRR match error */ + *ret_prot = _PAGE_CACHE_UC; + return -1; + } + if (mtrr_type != MTRR_TYPE_UNCACHABLE && + mtrr_type != MTRR_TYPE_WRBACK && + mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ + *ret_prot = _PAGE_CACHE_UC; + return -1; + } + + pat_type = prot & _PAGE_CACHE_MASK; + prot &= (~_PAGE_CACHE_MASK); + + /* Currently doing intersection by hand. Optimize it later. */ + if (pat_type == _PAGE_CACHE_WC) { + *ret_prot = prot | _PAGE_CACHE_WC; + } else if (pat_type == _PAGE_CACHE_UC_MINUS) { + *ret_prot = prot | _PAGE_CACHE_UC_MINUS; + } else if (pat_type == _PAGE_CACHE_UC || + mtrr_type == MTRR_TYPE_UNCACHABLE) { + *ret_prot = prot | _PAGE_CACHE_UC; + } else if (mtrr_type == MTRR_TYPE_WRCOMB) { + *ret_prot = prot | _PAGE_CACHE_WC; + } else { + *ret_prot = prot | _PAGE_CACHE_WB; + } + + return 0; +} + +int reserve_memtype(u64 start, u64 end, unsigned long req_type, + unsigned long *ret_type) +{ + struct memtype *new_entry = NULL; + struct memtype *parse; + unsigned long actual_type; + int err = 0; + + /* Only track when pat_wc_enabled */ + if (!pat_wc_enabled) { + if (ret_type) + *ret_type = req_type; + + return 0; + } + + /* Low ISA region is always mapped WB in page table. No need to track */ + if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) { + if (ret_type) + *ret_type = _PAGE_CACHE_WB; + + return 0; + } + + req_type &= _PAGE_CACHE_MASK; + err = pat_x_mtrr_type(start, end, req_type, &actual_type); + if (err) { + if (ret_type) + *ret_type = actual_type; + + return -EINVAL; + } + + new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + + new_entry->start = start; + new_entry->end = end; + new_entry->type = actual_type; + + if (ret_type) + *ret_type = actual_type; + + spin_lock(&memtype_lock); + + /* Search for existing mapping that overlaps the current range */ + list_for_each_entry(parse, &memtype_list, nd) { + struct memtype *saved_ptr; + + if (parse->start >= end) { + list_add(&new_entry->nd, parse->nd.prev); + new_entry = NULL; + break; + } + + if (start <= parse->start && end >= parse->start) { + if (actual_type != parse->type && ret_type) { + actual_type = parse->type; + *ret_type = actual_type; + new_entry->type = actual_type; + } + + if (actual_type != parse->type) { + printk( + KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", + current->comm, current->pid, + start, end, + cattr_name(actual_type), + cattr_name(parse->type)); + err = -EBUSY; + break; + } + + saved_ptr = parse; + /* + * Check to see whether the request overlaps more + * than one entry in the list + */ + list_for_each_entry_continue(parse, &memtype_list, nd) { + if (end <= parse->start) { + break; + } + + if (actual_type != parse->type) { + printk( + KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", + current->comm, current->pid, + start, end, + cattr_name(actual_type), + cattr_name(parse->type)); + err = -EBUSY; + break; + } + } + + if (err) { + break; + } + + /* No conflict. Go ahead and add this new entry */ + list_add(&new_entry->nd, saved_ptr->nd.prev); + new_entry = NULL; + break; + } + + if (start < parse->end) { + if (actual_type != parse->type && ret_type) { + actual_type = parse->type; + *ret_type = actual_type; + new_entry->type = actual_type; + } + + if (actual_type != parse->type) { + printk( + KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", + current->comm, current->pid, + start, end, + cattr_name(actual_type), + cattr_name(parse->type)); + err = -EBUSY; + break; + } + + saved_ptr = parse; + /* + * Check to see whether the request overlaps more + * than one entry in the list + */ + list_for_each_entry_continue(parse, &memtype_list, nd) { + if (end <= parse->start) { + break; + } + + if (actual_type != parse->type) { + printk( + KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", + current->comm, current->pid, + start, end, + cattr_name(actual_type), + cattr_name(parse->type)); + err = -EBUSY; + break; + } + } + + if (err) { + break; + } + + /* No conflict. Go ahead and add this new entry */ + list_add(&new_entry->nd, &saved_ptr->nd); + new_entry = NULL; + break; + } + } + + if (err) { + kfree(new_entry); + spin_unlock(&memtype_lock); + return err; + } + + if (new_entry) { + /* No conflict. Not yet added to the list. Add to the tail */ + list_add_tail(&new_entry->nd, &memtype_list); + } + + spin_unlock(&memtype_lock); + return err; +} + +int free_memtype(u64 start, u64 end) +{ + struct memtype *ml; + int err = -EINVAL; + + /* Only track when pat_wc_enabled */ + if (!pat_wc_enabled) { + return 0; + } + + /* Low ISA region is always mapped WB. No need to track */ + if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) { + return 0; + } + + spin_lock(&memtype_lock); + list_for_each_entry(ml, &memtype_list, nd) { + if (ml->start == start && ml->end == end) { + list_del(&ml->nd); + kfree(ml); + err = 0; + break; + } + } + spin_unlock(&memtype_lock); + + if (err) { + printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n", + current->comm, current->pid, start, end); + } + return err; +} + diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 90feb6f2562c..0d609c837a41 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -186,6 +186,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) +#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 3ed97144c07b..af4e07f661b8 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -57,6 +57,8 @@ #define MSR_MTRRfix4K_F8000 0x0000026f #define MSR_MTRRdefType 0x000002ff +#define MSR_IA32_CR_PAT 0x00000277 + #define MSR_IA32_DEBUGCTLMSR 0x000001d9 #define MSR_IA32_LASTBRANCHFROMIP 0x000001db #define MSR_IA32_LASTBRANCHTOIP 0x000001dc diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 319d065800be..968794af93f9 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -84,6 +84,8 @@ struct mtrr_gentry #ifdef __KERNEL__ +extern u8 mtrr_type_lookup(u64 addr, u64 end); + /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR extern void mtrr_save_fixed_ranges(void *); diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h new file mode 100644 index 000000000000..8b822b5a1786 --- /dev/null +++ b/include/asm-x86/pat.h @@ -0,0 +1,16 @@ + +#ifndef _ASM_PAT_H +#define _ASM_PAT_H 1 + +#include + +extern int pat_wc_enabled; + +extern void pat_init(void); + +extern int reserve_memtype(u64 start, u64 end, + unsigned long req_type, unsigned long *ret_type); +extern int free_memtype(u64 start, u64 end); + +#endif + diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 9cf472aeb9ce..ca6deb3de7c0 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -57,6 +57,12 @@ #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) +#define _PAGE_CACHE_WB (0) +#define _PAGE_CACHE_WC (_PAGE_PWT) +#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) +#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) + #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -- cgit v1.2.1 From 55c626820a82b25d7fceca702e9422037ae80626 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 26 Mar 2008 06:19:45 +0100 Subject: x86: revert ucminus change Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 3d0a589d92c4..df95d1d6b4df 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -159,11 +159,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, switch (mode) { case IOR_MODE_UNCACHED: default: - /* - * FIXME: we will use UC MINUS for now, as video fb drivers - * depend on it. Upcoming ioremap_wc() will fix this behavior. - */ - prot = PAGE_KERNEL_UC_MINUS; + prot = PAGE_KERNEL_NOCACHE; break; case IOR_MODE_CACHED: prot = PAGE_KERNEL; -- cgit v1.2.1 From 3a96ce8cac808fbed5493adc5c605bced28e2ca1 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:16 -0700 Subject: x86: PAT make ioremap_change_attr non-static Make ioremap_change_attr() non-static and use prot_val in place of ioremap_mode. This interface is used in subsequent PAT patches. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 29 ++++++++++++----------------- include/asm-x86/io.h | 3 +++ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index df95d1d6b4df..2ac09a5822cb 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -20,11 +20,6 @@ #include #include -enum ioremap_mode { - IOR_MODE_UNCACHED, - IOR_MODE_CACHED, -}; - #ifdef CONFIG_X86_64 unsigned long __phys_addr(unsigned long x) @@ -90,18 +85,18 @@ int page_is_ram(unsigned long pagenr) * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ -static int ioremap_change_attr(unsigned long vaddr, unsigned long size, - enum ioremap_mode mode) +int ioremap_change_attr(unsigned long vaddr, unsigned long size, + unsigned long prot_val) { unsigned long nrpages = size >> PAGE_SHIFT; int err; - switch (mode) { - case IOR_MODE_UNCACHED: + switch (prot_val) { + case _PAGE_CACHE_UC: default: err = set_memory_uc(vaddr, nrpages); break; - case IOR_MODE_CACHED: + case _PAGE_CACHE_WB: err = set_memory_wb(vaddr, nrpages); break; } @@ -119,7 +114,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size, * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, - enum ioremap_mode mode) + unsigned long prot_val) { unsigned long pfn, offset, last_addr, vaddr; struct vm_struct *area; @@ -156,12 +151,12 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, WARN_ON_ONCE(is_ram); } - switch (mode) { - case IOR_MODE_UNCACHED: + switch (prot_val) { + case _PAGE_CACHE_UC: default: prot = PAGE_KERNEL_NOCACHE; break; - case IOR_MODE_CACHED: + case _PAGE_CACHE_WB: prot = PAGE_KERNEL; break; } @@ -186,7 +181,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, return NULL; } - if (ioremap_change_attr(vaddr, size, mode) < 0) { + if (ioremap_change_attr(vaddr, size, prot_val) < 0) { vunmap(area->addr); return NULL; } @@ -217,13 +212,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, */ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { - return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); + return __ioremap(phys_addr, size, _PAGE_CACHE_UC); } EXPORT_SYMBOL(ioremap_nocache); void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { - return __ioremap(phys_addr, size, IOR_MODE_CACHED); + return __ioremap(phys_addr, size, _PAGE_CACHE_WB); } EXPORT_SYMBOL(ioremap_cache); diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 5a58b176dd61..6fa150fa68fa 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -3,3 +3,6 @@ #else # include "io_64.h" #endif +extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, + unsigned long prot_val); + -- cgit v1.2.1 From d7677d4034f040f4ce565713e0b83a31cc26f23e Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:17 -0700 Subject: x86: PAT use reserve free memtype in ioremap and iounmap Use reserve_memtype and free_memtype interfaces in ioremap/iounmap to avoid aliasing. If there is an existing alias for the region, inherit the memory type from the alias. If there are conflicting aliases for the entire region, then fail ioremap. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 2ac09a5822cb..20c01f2b2e11 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 @@ -118,6 +119,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, { unsigned long pfn, offset, last_addr, vaddr; struct vm_struct *area; + unsigned long new_prot_val; pgprot_t prot; /* Don't allow wraparound or zero size */ @@ -151,6 +153,28 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, WARN_ON_ONCE(is_ram); } + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + + if (reserve_memtype(phys_addr, phys_addr + size, + prot_val, &new_prot_val)) { + /* + * Do not fallback to certain memory types with certain + * requested type: + * - request is uncached, return cannot be write-back + */ + if ((prot_val == _PAGE_CACHE_UC && + new_prot_val == _PAGE_CACHE_WB)) { + free_memtype(phys_addr, phys_addr + size); + return NULL; + } + prot_val = new_prot_val; + } + switch (prot_val) { case _PAGE_CACHE_UC: default: @@ -161,13 +185,6 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, break; } - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr+1) - phys_addr; - /* * Ok, go for it.. */ @@ -177,11 +194,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { + free_memtype(phys_addr, phys_addr + size); free_vm_area(area); return NULL; } if (ioremap_change_attr(vaddr, size, prot_val) < 0) { + free_memtype(phys_addr, phys_addr + size); vunmap(area->addr); return NULL; } @@ -265,6 +284,8 @@ void iounmap(volatile void __iomem *addr) return; } + free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); + /* Finally remove it */ o = remove_vm_area((void *)addr); BUG_ON(p != o || o == NULL); -- cgit v1.2.1 From 1219333dfdd488e85f08cf07881b8bc63cf92f21 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:18 -0700 Subject: x86: PAT use reserve free memtype in set_memory_uc Use reserve_memtype and free_memtype interfaces in set_memory_uc/set_memory_wb interfaces to avoid aliasing. Usage model of set_memory_uc and set_memory_wb is for RAM memory and users will first call set_memory_uc and call set_memory_wb after use to reset the attribute. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 4 ++-- arch/x86/mm/pageattr.c | 21 +++++++++++++++++++-- include/asm-x86/cacheflush.h | 2 ++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 20c01f2b2e11..0cdb7f11ce49 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -95,10 +95,10 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, switch (prot_val) { case _PAGE_CACHE_UC: default: - err = set_memory_uc(vaddr, nrpages); + err = _set_memory_uc(vaddr, nrpages); break; case _PAGE_CACHE_WB: - err = set_memory_wb(vaddr, nrpages); + err = _set_memory_wb(vaddr, nrpages); break; } diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f7d5ca170c22..938df5e8402b 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -19,6 +19,7 @@ #include #include #include +#include /* * The current flushing context - we pass it instead of 5 arguments: @@ -770,18 +771,34 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages, return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); } -int set_memory_uc(unsigned long addr, int numpages) +int _set_memory_uc(unsigned long addr, int numpages) { return change_page_attr_set(addr, numpages, __pgprot(_PAGE_CACHE_UC)); } + +int set_memory_uc(unsigned long addr, int numpages) +{ + if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, + _PAGE_CACHE_UC, NULL)) + return -EINVAL; + + return _set_memory_uc(addr, numpages); +} EXPORT_SYMBOL(set_memory_uc); -int set_memory_wb(unsigned long addr, int numpages) +int _set_memory_wb(unsigned long addr, int numpages) { return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_CACHE_MASK)); } + +int set_memory_wb(unsigned long addr, int numpages) +{ + free_memtype(addr, addr + numpages * PAGE_SIZE); + + return _set_memory_wb(addr, numpages); +} EXPORT_SYMBOL(set_memory_wb); int set_memory_x(unsigned long addr, int numpages) diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5396c212d8c0..5676fba10a09 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -34,6 +34,8 @@ int set_pages_nx(struct page *page, int numpages); int set_pages_ro(struct page *page, int numpages); int set_pages_rw(struct page *page, int numpages); +int _set_memory_uc(unsigned long addr, int numpages); +int _set_memory_wb(unsigned long addr, int numpages); int set_memory_uc(unsigned long addr, int numpages); int set_memory_wb(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); -- cgit v1.2.1 From 03d72aa18f15df9987fe5837284e15b9ccf6e3f8 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:19 -0700 Subject: x86: PAT use reserve free memtype in pci_mmap_page_range Add reserve_memtype and free_memtype wrapper for pci_mmap_page_range. Free is called on unmap, but identity map continues to be mapped as per pci_mmap_page_range request, until next request for the same region calls ioremap_change_attr(), which will go through without conflict. This way of mapping is identical to one used in ioremap/iounmap. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/pci/i386.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 8 deletions(-) diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 103b9dff1213..4ebf52f6b1fd 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -30,6 +30,9 @@ #include #include #include +#include + +#include #include "pci.h" @@ -297,10 +300,34 @@ void pcibios_set_master(struct pci_dev *dev) pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); } +static void pci_unmap_page_range(struct vm_area_struct *vma) +{ + u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; + free_memtype(addr, addr + vma->vm_end - vma->vm_start); +} + +static void pci_track_mmap_page_range(struct vm_area_struct *vma) +{ + u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; + unsigned long flags = pgprot_val(vma->vm_page_prot) + & _PAGE_CACHE_MASK; + + reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); +} + +static struct vm_operations_struct pci_mmap_ops = { + .open = pci_track_mmap_page_range, + .close = pci_unmap_page_range, +}; + int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot; + u64 addr = vma->vm_pgoff << PAGE_SHIFT; + unsigned long len = vma->vm_end - vma->vm_start; + unsigned long flags; + unsigned long new_flags; /* I/O space cannot be accessed via normal processor loads and * stores on this platform. @@ -308,21 +335,46 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, if (mmap_state == pci_mmap_io) return -EINVAL; - /* Leave vm_pgoff as-is, the PCI space address is the physical - * address on this platform. - */ prot = pgprot_val(vma->vm_page_prot); - if (boot_cpu_data.x86 > 3) - prot |= _PAGE_PCD | _PAGE_PWT; + if (pat_wc_enabled && write_combine) + prot |= _PAGE_CACHE_WC; + else if (boot_cpu_data.x86 > 3) + prot |= _PAGE_CACHE_UC; + vma->vm_page_prot = __pgprot(prot); - /* Write-combine setting is ignored, it is changed via the mtrr - * interfaces on this platform. - */ + flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; + if (reserve_memtype(addr, addr + len, flags, &new_flags)) { + /* + * Do not fallback to certain memory types with certain + * requested type: + * - request is uncached, return cannot be write-back + * - request is uncached, return cannot be write-combine + * - request is write-combine, return cannot be write-back + */ + if ((flags == _PAGE_CACHE_UC && + (new_flags == _PAGE_CACHE_WB || + new_flags == _PAGE_CACHE_WC)) || + (flags == _PAGE_CACHE_WC && + new_flags == _PAGE_CACHE_WB)) { + free_memtype(addr, addr+len); + return -EINVAL; + } + flags = new_flags; + } + + if (vma->vm_pgoff <= max_pfn_mapped && + ioremap_change_attr((unsigned long)__va(addr), len, flags)) { + free_memtype(addr, addr + len); + return -EINVAL; + } + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; + vma->vm_ops = &pci_mmap_ops; + return 0; } -- cgit v1.2.1 From ef354af4629e5cc76a3f64fc46d452f2b56d5a59 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:23 -0700 Subject: x86: PAT add set_memory_wc() interface Add a set_memory_wc interface(), similar to set_memory_uc interface. Callers has to call set_memory_uc, set_memory_wb and set_memory_wc, set_memory_wb as pairs. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 19 +++++++++++++++++++ include/asm-x86/cacheflush.h | 2 ++ 2 files changed, 21 insertions(+) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 938df5e8402b..270cab2e6030 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -787,6 +787,25 @@ int set_memory_uc(unsigned long addr, int numpages) } EXPORT_SYMBOL(set_memory_uc); +int _set_memory_wc(unsigned long addr, int numpages) +{ + return change_page_attr_set(addr, numpages, + __pgprot(_PAGE_CACHE_WC)); +} + +int set_memory_wc(unsigned long addr, int numpages) +{ + if (!pat_wc_enabled) + return set_memory_uc(addr, numpages); + + if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, + _PAGE_CACHE_WC, NULL)) + return -EINVAL; + + return _set_memory_wc(addr, numpages); +} +EXPORT_SYMBOL(set_memory_wc); + int _set_memory_wb(unsigned long addr, int numpages) { return change_page_attr_clear(addr, numpages, diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5676fba10a09..90437d3f7610 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -35,8 +35,10 @@ int set_pages_ro(struct page *page, int numpages); int set_pages_rw(struct page *page, int numpages); int _set_memory_uc(unsigned long addr, int numpages); +int _set_memory_wc(unsigned long addr, int numpages); int _set_memory_wb(unsigned long addr, int numpages); int set_memory_uc(unsigned long addr, int numpages); +int set_memory_wc(unsigned long addr, int numpages); int set_memory_wb(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); -- cgit v1.2.1 From b310f381d220b2c6e3fab16e8c6e4ca13eea75b2 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:24 -0700 Subject: x86: PAT add ioremap_wc() interface Introduce ioremap_wc for wc remap. (generic wrapper is in a later patch) Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 30 ++++++++++++++++++++++++++++++ include/asm-x86/io.h | 3 +++ include/asm-x86/pgtable.h | 2 ++ 3 files changed, 35 insertions(+) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0cdb7f11ce49..51cd3956c564 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -97,6 +97,9 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, default: err = _set_memory_uc(vaddr, nrpages); break; + case _PAGE_CACHE_WC: + err = _set_memory_wc(vaddr, nrpages); + break; case _PAGE_CACHE_WB: err = _set_memory_wb(vaddr, nrpages); break; @@ -166,8 +169,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, * Do not fallback to certain memory types with certain * requested type: * - request is uncached, return cannot be write-back + * - request is uncached, return cannot be write-combine + * - request is write-combine, return cannot be write-back */ if ((prot_val == _PAGE_CACHE_UC && + (new_prot_val == _PAGE_CACHE_WB || + new_prot_val == _PAGE_CACHE_WC)) || + (prot_val == _PAGE_CACHE_WC && new_prot_val == _PAGE_CACHE_WB)) { free_memtype(phys_addr, phys_addr + size); return NULL; @@ -180,6 +188,9 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, default: prot = PAGE_KERNEL_NOCACHE; break; + case _PAGE_CACHE_WC: + prot = PAGE_KERNEL_WC; + break; case _PAGE_CACHE_WB: prot = PAGE_KERNEL; break; @@ -235,6 +246,25 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) } EXPORT_SYMBOL(ioremap_nocache); +/** + * ioremap_wc - map memory into CPU space write combined + * @offset: bus address of the memory + * @size: size of the resource to map + * + * This version of ioremap ensures that the memory is marked write combining. + * Write combining allows faster writes to some hardware devices. + * + * Must be freed with iounmap. + */ +void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) +{ + if (pat_wc_enabled) + return __ioremap(phys_addr, size, _PAGE_CACHE_WC); + else + return ioremap_nocache(phys_addr, size); +} +EXPORT_SYMBOL(ioremap_wc); + void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { return __ioremap(phys_addr, size, _PAGE_CACHE_WB); diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 6fa150fa68fa..599cad3505c1 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -1,3 +1,5 @@ +#define ARCH_HAS_IOREMAP_WC + #ifdef CONFIG_X86_32 # include "io_32.h" #else @@ -5,4 +7,5 @@ #endif extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val); +extern void __iomem * ioremap_wc(unsigned long offset, unsigned long size); diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index ca6deb3de7c0..e814cfe96af2 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -90,6 +90,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) @@ -107,6 +108,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) +#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) -- cgit v1.2.1 From 6997ab4982a29925e79f72c3a59823cf944c3529 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Tue, 18 Mar 2008 17:00:25 -0700 Subject: x86: add PAT related debug prints Adds debug prints at critical code. Adds enough info in dmesg to allow us to do effective first round of analysis of any issues that may result due to PAT patch series. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 6 +++++- arch/x86/mm/pat.c | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 51cd3956c564..6cd3418afe71 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -131,7 +131,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, return NULL; if (!phys_addr_valid(phys_addr)) { - printk(KERN_WARNING "ioremap: invalid physical address %lx\n", + printk(KERN_WARNING "ioremap: invalid physical address %llx\n", phys_addr); WARN_ON_ONCE(1); return NULL; @@ -177,6 +177,10 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, new_prot_val == _PAGE_CACHE_WC)) || (prot_val == _PAGE_CACHE_WC && new_prot_val == _PAGE_CACHE_WB)) { + printk( + "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", + phys_addr, phys_addr + size, + prot_val, new_prot_val); free_memtype(phys_addr, phys_addr + size); return NULL; } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 7cc71d868483..57a2af36d6ee 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -246,6 +246,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, struct memtype *saved_ptr; if (parse->start >= end) { + printk("New Entry\n"); list_add(&new_entry->nd, parse->nd.prev); new_entry = NULL; break; @@ -295,6 +296,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, break; } + printk("Overlap at 0x%Lx-0x%Lx\n", + saved_ptr->start, saved_ptr->end); /* No conflict. Go ahead and add this new entry */ list_add(&new_entry->nd, saved_ptr->nd.prev); new_entry = NULL; @@ -345,6 +348,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, break; } + printk("Overlap at 0x%Lx-0x%Lx\n", + saved_ptr->start, saved_ptr->end); /* No conflict. Go ahead and add this new entry */ list_add(&new_entry->nd, &saved_ptr->nd); new_entry = NULL; @@ -353,6 +358,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, } if (err) { + printk( + "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", + start, end, cattr_name(new_entry->type), + cattr_name(req_type)); kfree(new_entry); spin_unlock(&memtype_lock); return err; @@ -361,6 +370,19 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, if (new_entry) { /* No conflict. Not yet added to the list. Add to the tail */ list_add_tail(&new_entry->nd, &memtype_list); + printk("New Entry\n"); + } + + if (ret_type) { + printk( + "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", + start, end, cattr_name(actual_type), + cattr_name(req_type), cattr_name(*ret_type)); + } else { + printk( + "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n", + start, end, cattr_name(actual_type), + cattr_name(req_type)); } spin_unlock(&memtype_lock); @@ -397,6 +419,8 @@ int free_memtype(u64 start, u64 end) printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n", current->comm, current->pid, start, end); } + + printk( "free_memtype request 0x%Lx-0x%Lx\n", start, end); return err; } -- cgit v1.2.1 From 042b78e4dbb8919a59e77ba5d502a5a14405dea1 Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Mon, 24 Mar 2008 14:22:35 -0700 Subject: x86: PAT infrastructure patch, documentation updates Fix double help section in PAT Kconfig. Thanks to Randy Dunlap for catching this bug. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5b46756e4c7c..629923126653 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1015,12 +1015,12 @@ config X86_PAT depends on MTRR && NONPROMISC_DEVMEM help Use PAT attributes to setup page level cache control. - ---help--- + PATs are the modern equivalents of MTRRs and are much more flexible than MTRRs. Say N here if you see bootup problems (boot crash, boot hang, - spontaneous reboots) or a non-working Xorg. + spontaneous reboots) or a non-working video driver. If unsure, say Y. -- cgit v1.2.1 From 52783fa8d6b847857fdd86df418e09c026d816f5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 21 Mar 2008 15:42:28 +0100 Subject: x86: PAT fix build fix for !CONFIG_MTRR. Signed-off-by: Ingo Molnar --- include/asm-x86/mtrr.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 968794af93f9..ee172296e056 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -84,10 +84,9 @@ struct mtrr_gentry #ifdef __KERNEL__ -extern u8 mtrr_type_lookup(u64 addr, u64 end); - /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR +extern u8 mtrr_type_lookup(u64 addr, u64 end); extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); extern int mtrr_add (unsigned long base, unsigned long size, @@ -101,6 +100,13 @@ extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); # else +static inline u8 mtrr_type_lookup(u64 addr, u64 end) +{ + /* + * Return no-MTRRs: + */ + return 0xff; +} #define mtrr_save_fixed_ranges(arg) do {} while (0) #define mtrr_save_state() do {} while (0) static __inline__ int mtrr_add (unsigned long base, unsigned long size, -- cgit v1.2.1 From 35605a1027ac630f85a1b95684f7e86b82498cd6 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 24 Mar 2008 16:02:01 -0700 Subject: x86: enable PAT for amd k8 and fam10h make known_pat_cpu to think amd k8 and fam10h is ok too. also make tom2 below to be WRBACK Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/generic.c | 17 +++++++++++++++++ arch/x86/kernel/cpu/mtrr/main.c | 2 +- arch/x86/mm/pat.c | 6 ++++++ include/asm-x86/mtrr.h | 1 + 4 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 011e07e99cd1..74ec2ea4ed3e 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -37,6 +37,7 @@ static struct fixed_range_block fixed_range_blocks[] = { static unsigned long smp_changes_mask; static struct mtrr_state mtrr_state = {}; static int mtrr_state_set; +static u64 tom2; #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "mtrr." @@ -138,6 +139,11 @@ u8 mtrr_type_lookup(u64 start, u64 end) } } + if (tom2) { + if (start >= (1ULL<<32) && (end < tom2)) + return MTRR_TYPE_WRBACK; + } + if (prev_match != 0xFF) return prev_match; @@ -206,6 +212,15 @@ void __init get_mtrr_state(void) mtrr_state.def_type = (lo & 0xff); mtrr_state.enabled = (lo & 0xc00) >> 10; + if (amd_special_default_mtrr()) { + unsigned lo, hi; + /* TOP_MEM2 */ + rdmsr(MSR_K8_TOP_MEM2, lo, hi); + tom2 = hi; + tom2 <<= 32; + tom2 |= lo; + tom2 &= 0xffffff8000000ULL; + } if (mtrr_show) { int high_width; @@ -236,6 +251,8 @@ void __init get_mtrr_state(void) else printk(KERN_INFO "MTRR %u disabled\n", i); } + if (tom2) + printk(KERN_INFO "TOM2: %016lx aka %ldM\n", tom2, tom2>>20); } mtrr_state_set = 1; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index a6450b3ae759..6a1e278d9323 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -627,7 +627,7 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); #define Tom2Enabled (1U << 21) #define Tom2ForceMemTypeWB (1U << 22) -static __init int amd_special_default_mtrr(void) +int __init amd_special_default_mtrr(void) { u32 l, h; diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 57a2af36d6ee..0648a2225b09 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -47,6 +47,12 @@ static int pat_known_cpu(void) return 1; } } + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x11) { + if (cpu_has_pat) { + return 1; + } + } pat_wc_enabled = 0; printk(KERN_INFO "CPU and/or kernel does not support PAT.\n"); diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index ee172296e056..d3d26625aeaf 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -99,6 +99,7 @@ extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); extern int mtrr_trim_uncached_memory(unsigned long end_pfn); +extern int amd_special_default_mtrr(void); # else static inline u8 mtrr_type_lookup(u64 addr, u64 end) { -- cgit v1.2.1 From a7c7d0e91daebd7c5e51f9416d612b6a15e7e79a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 8 Apr 2008 16:25:42 +0200 Subject: x86: tom2 warning fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/generic.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 74ec2ea4ed3e..353efe4f5017 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -251,8 +251,10 @@ void __init get_mtrr_state(void) else printk(KERN_INFO "MTRR %u disabled\n", i); } - if (tom2) - printk(KERN_INFO "TOM2: %016lx aka %ldM\n", tom2, tom2>>20); + if (tom2) { + printk(KERN_INFO "TOM2: %016llx aka %lldM\n", + tom2, tom2>>20); + } } mtrr_state_set = 1; -- cgit v1.2.1 From 9307cacad0dfe3749f00303125c6f7f0523e5616 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 24 Mar 2008 23:24:34 -0700 Subject: x86: pat cpu feature bit setting for known cpus Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 25 +++++++++++++++++++++++++ arch/x86/kernel/setup_64.c | 7 +++++++ arch/x86/mm/pat.c | 15 ++------------- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0dd87b8d6707..d999d7833bc2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -309,6 +309,19 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) } + clear_cpu_cap(c, X86_FEATURE_PAT); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + } + } /* @@ -397,6 +410,18 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) init_scattered_cpuid_features(c); } + clear_cpu_cap(c, X86_FEATURE_PAT); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + case X86_VENDOR_INTEL: + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); + break; + } } static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 20034493b5ad..c6fe1e4bc7c2 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -962,12 +962,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) if (c->extended_cpuid_level >= 0x80000007) c->x86_power = cpuid_edx(0x80000007); + + clear_cpu_cap(c, X86_FEATURE_PAT); + switch (c->x86_vendor) { case X86_VENDOR_AMD: early_init_amd(c); + if (c->x86 >= 0xf && c->x86 <= 0x11) + set_cpu_cap(c, X86_FEATURE_PAT); break; case X86_VENDOR_INTEL: early_init_intel(c); + if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) + set_cpu_cap(c, X86_FEATURE_PAT); break; } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 0648a2225b09..72c0f6097402 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -40,19 +40,8 @@ static int pat_known_cpu(void) if (!pat_wc_enabled) return 0; - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - (boot_cpu_data.x86 == 0xF || - (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 15))) { - if (cpu_has_pat) { - return 1; - } - } - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x11) { - if (cpu_has_pat) { - return 1; - } - } + if (cpu_has_pat) + return 1; pat_wc_enabled = 0; printk(KERN_INFO "CPU and/or kernel does not support PAT.\n"); -- cgit v1.2.1 From dee7cbb210fdd266ad81af4689bcbac3649f38ff Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Mon, 24 Mar 2008 14:39:55 -0700 Subject: x86: PAT bug fix for attribute type check after reserve_memtype Bug fixes for reserve_memtype() call in __ioremap and pci_mmap_page_range(). If reserve_memtype returns non-zero, then it is an error and subsequent free is not required. Requested and returned prot value check should be done when reserve_memtype returns success. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 11 +++++++++-- arch/x86/pci/i386.c | 7 ++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 6cd3418afe71..3f7f05e2c434 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -124,6 +124,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, struct vm_struct *area; unsigned long new_prot_val; pgprot_t prot; + int retval; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -163,8 +164,14 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; - if (reserve_memtype(phys_addr, phys_addr + size, - prot_val, &new_prot_val)) { + retval = reserve_memtype(phys_addr, phys_addr + size, + prot_val, &new_prot_val); + if (retval) { + printk("reserve_memtype returned %d\n", retval); + return NULL; + } + + if (prot_val != new_prot_val) { /* * Do not fallback to certain memory types with certain * requested type: diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 4ebf52f6b1fd..2ead72363077 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -328,6 +328,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, unsigned long len = vma->vm_end - vma->vm_start; unsigned long flags; unsigned long new_flags; + int retval; /* I/O space cannot be accessed via normal processor loads and * stores on this platform. @@ -344,7 +345,11 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, vma->vm_page_prot = __pgprot(prot); flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; - if (reserve_memtype(addr, addr + len, flags, &new_flags)) { + retval = reserve_memtype(addr, addr + len, flags, &new_flags); + if (retval) + return retval; + + if (flags != new_flags) { /* * Do not fallback to certain memory types with certain * requested type: -- cgit v1.2.1 From b450e5e816d10893e17f57b3eb9d29c52635862a Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Tue, 25 Mar 2008 16:51:26 -0700 Subject: x86: PAT bug fix for attribute type check after reserve_memtype, debug Make the PAT related printks in ioremap pr_debug. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 3f7f05e2c434..7338c5d3dd37 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -167,7 +167,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, retval = reserve_memtype(phys_addr, phys_addr + size, prot_val, &new_prot_val); if (retval) { - printk("reserve_memtype returned %d\n", retval); + pr_debug("Warning: reserve_memtype returned %d\n", retval); return NULL; } @@ -184,7 +184,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, new_prot_val == _PAGE_CACHE_WC)) || (prot_val == _PAGE_CACHE_WC && new_prot_val == _PAGE_CACHE_WB)) { - printk( + pr_debug( "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", phys_addr, phys_addr + size, prot_val, new_prot_val); -- cgit v1.2.1 From cc7594e420283cf6070538655f1a4fbebb3fb853 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 18 Mar 2008 18:46:06 -0700 Subject: i386: arch/x86/math-emu/fpu_entry.c warning fix arch/x86/math-emu/fpu_entry.c:555: warning: 'entry_sel_off.empty' is used uninitialized in this function Presumably it's harmless, but I'll sleep better at night knowing that we initialised it. Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/math-emu/fpu_entry.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 760baeea5f07..4bab3b145392 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -276,6 +276,7 @@ asmlinkage void math_emulate(long arg) entry_sel_off.offset = FPU_ORIG_EIP; entry_sel_off.selector = FPU_CS; entry_sel_off.opcode = (byte1 << 8) | FPU_modrm; + entry_sel_off.empty = 0; FPU_rm = FPU_modrm & 7; -- cgit v1.2.1 From f2e576b8141ec1cf2669ff7bbc92f0a9c147f34f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 18 Mar 2008 18:54:45 -0700 Subject: i386: arch/x86/math-emu/reg_ld_str.c: fix warning arch/x86/math-emu/reg_ld_str.c:380: warning: 'l[0]' may be used uninitialized in this function arch/x86/math-emu/reg_ld_str.c:380: warning: 'l[1]' may be used uninitialized in this function I can't actually spot the bug here. There's one obvious place, but fixing that didn't shut the warning up. Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/math-emu/reg_ld_str.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index 799d4af5be66..02af772a24db 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c @@ -383,15 +383,15 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) int exp; FPU_REG tmp; + l[0] = 0; + l[1] = 0; if (st0_tag == TAG_Valid) { reg_copy(st0_ptr, &tmp); exp = exponent(&tmp); if (exp < DOUBLE_Emin) { /* It may be a denormal */ addexponent(&tmp, -DOUBLE_Emin + 52); /* largest exp to be 51 */ - - denormal_arg: - +denormal_arg: if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) { #ifdef PECULIAR_486 /* Did it round to a non-denormal ? */ @@ -477,8 +477,7 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) /* This is a special case: see sec 16.2.5.1 of the 80486 book */ /* Overflow to infinity */ - l[0] = 0x00000000; /* Set to */ - l[1] = 0x7ff00000; /* + INF */ + l[1] = 0x7ff00000; /* Set to + INF */ } else { if (precision_loss) { if (increment) @@ -492,8 +491,6 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) } } else if (st0_tag == TAG_Zero) { /* Number is zero */ - l[0] = 0; - l[1] = 0; } else if (st0_tag == TAG_Special) { st0_tag = FPU_Special(st0_ptr); if (st0_tag == TW_Denormal) { @@ -508,7 +505,6 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) reg_copy(st0_ptr, &tmp); goto denormal_arg; } else if (st0_tag == TW_Infinity) { - l[0] = 0; l[1] = 0x7ff00000; } else if (st0_tag == TW_NaN) { /* Is it really a NaN ? */ @@ -532,7 +528,6 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) EXCEPTION(EX_Invalid); if (!(control_word & CW_Invalid)) return 0; - l[0] = 0; l[1] = 0xfff80000; } } -- cgit v1.2.1 From 6e908947b4995bc0e551a8257c586d5c3e428201 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 21 Mar 2008 14:32:36 +0100 Subject: x86: fix ioapic bug again un-revert: commit 4960c9df1407c2723459c60ff13e6efe0c209c62 Author: Thomas Gleixner Date: Tue Jan 22 10:23:01 2008 +0100 Revert "x86: fix NMI watchdog & 'stopped time' problem" This reverts commit d4d25deca49ec2527a634557bf5a6cf449f85deb. needs a proper fix though ... Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 12 ++++++++++-- arch/x86/kernel/nmi_32.c | 9 +++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 742fab45e1c6..8ed6eb967652 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -2124,10 +2124,14 @@ static inline void __init check_timer(void) { int apic1, pin1, apic2, pin2; int vector; + unsigned int ver; unsigned long flags; local_irq_save(flags); + ver = apic_read(APIC_LVR); + ver = GET_APIC_VERSION(ver); + /* * get/set the timer IRQ vector: */ @@ -2140,11 +2144,15 @@ static inline void __init check_timer(void) * mode for the 8259A whenever interrupts are routed * through I/O APICs. Also IRQ0 has to be enabled in * the 8259A which implies the virtual wire has to be - * disabled in the local APIC. + * disabled in the local APIC. Finally timer interrupts + * need to be acknowledged manually in the 8259A for + * timer_interrupt() and for the i82489DX when using + * the NMI watchdog. */ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); init_8259A(1); - timer_ack = 1; + timer_ack = !cpu_has_tsc; + timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); if (timer_over_8254 > 0) enable_8259A_irq(0); diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 662e63e1cd57..8421d0ac6f22 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -26,6 +26,7 @@ #include #include +#include #include "mach_traps.h" @@ -81,7 +82,7 @@ int __init check_nmi_watchdog(void) prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); if (!prev_nmi_count) - return -1; + goto error; printk(KERN_INFO "Testing NMI watchdog ... "); @@ -118,7 +119,7 @@ int __init check_nmi_watchdog(void) if (!atomic_read(&nmi_active)) { kfree(prev_nmi_count); atomic_set(&nmi_active, -1); - return -1; + goto error; } printk("OK.\n"); @@ -129,6 +130,10 @@ int __init check_nmi_watchdog(void) kfree(prev_nmi_count); return 0; +error: + timer_ack = !cpu_has_tsc; + + return -1; } static int __init setup_nmi_watchdog(char *str) -- cgit v1.2.1 From 709f744f18ebc3a810d29c8d5502bf20c3cecc70 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 13 Mar 2008 09:08:51 +0000 Subject: x86: bitops asm constraint fixes This (simplified) piece of code didn't behave as expected due to incorrect constraints in some of the bitops functions, when X86_FEATURE_xxx is referring to other than the first long: int test(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_xxx)) clear_cpu_cap(c, X86_FEATURE_xxx); return cpu_has(c, X86_FEATURE_xxx); } I'd really like understand, though, what the policy of (not) having a "memory" clobber in these operations is - currently, this appears to be totally inconsistent. Also, many comments of the non-atomic functions say those may also be re-ordered - this contradicts the use of "asm volatile" in there, which again I'd like to understand. As much as all of these, using 'int' for the 'nr' parameter and 'void *' for the 'addr' one is in conflict with Documentation/atomic_ops.txt, especially because bt{,c,r,s} indeed take the bit index as signed (which hence would really need special precaution) and access the full 32 bits (if 'unsigned long' was used properly here, 64 bits for x86-64) pointed at, so invalid uses like referencing a 'char' array cannot currently be caught. Finally, the code with and without this patch relies heavily on the -fno-strict-aliasing compiler switch and I'm not certain this really is a good idea. In the light of all of this I'm sending this as RFC, as fixing the above might warrant a much bigger patch... Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- include/asm-x86/bitops.h | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1a23ce1a5697..7a76555b676a 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -24,9 +24,12 @@ /* Technically wrong, but this avoids compilation errors on some gcc versions. */ #define ADDR "=m" (*(volatile long *) addr) +#define BIT_ADDR "=m" (((volatile int *) addr)[nr >> 5]) #else #define ADDR "+m" (*(volatile long *) addr) +#define BIT_ADDR "+m" (((volatile int *) addr)[nr >> 5]) #endif +#define BASE_ADDR "m" (*(volatile int *) addr) /** * set_bit - Atomically set a bit in memory @@ -79,9 +82,8 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%0" - : ADDR - : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btr %1,%2" + : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -100,7 +102,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr) { - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -135,7 +137,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) */ static inline void __change_bit(int nr, volatile void *addr) { - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -149,8 +151,8 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" - : ADDR : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btc %1,%2" + : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -198,10 +200,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; - asm("bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + asm volatile("bts %2,%3\n\t" + "sbb %0,%0" + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -238,10 +240,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -250,10 +252,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -288,10 +290,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) { int oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%3\n\t" "sbb %0,%0" : "=r" (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); + : "m" (((volatile const int *)addr)[nr >> 5]), + "Ir" (nr), BASE_ADDR); return oldbit; } @@ -310,6 +313,8 @@ static int test_bit(int nr, const volatile unsigned long *addr); constant_test_bit((nr),(addr)) : \ variable_test_bit((nr),(addr))) +#undef BASE_ADDR +#undef BIT_ADDR #undef ADDR #ifdef CONFIG_X86_32 -- cgit v1.2.1 From 8434e73d9ec2aeaa86389e362b960ffba5edd9c9 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 21 Mar 2008 08:31:57 -0500 Subject: x86: increase max physical memory size of 64-bit Increase the maximum physical address size of x86_64 system to 44-bits. This is in preparation for future chips that support larger physical memory sizes. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/sparsemem.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index fa58cd55411a..fa684f353aa7 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h @@ -26,8 +26,8 @@ # endif #else /* CONFIG_X86_32 */ # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ -# define MAX_PHYSADDR_BITS 40 -# define MAX_PHYSMEM_BITS 40 +# define MAX_PHYSADDR_BITS 44 +# define MAX_PHYSMEM_BITS 44 #endif #endif /* CONFIG_SPARSEMEM */ -- cgit v1.2.1 From fa3f1f42b4efac84c459383218c20ab39e4df566 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 21 Mar 2008 08:34:25 -0500 Subject: x86: allow NODES_SHIFT to be a config option on x86_64 Allow the maximum number of nodes in an x86_64 system to be configurable. This patch does NOT change the default value but allows the value to be a config option. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 629923126653..e50e38e76d9f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -885,7 +885,7 @@ config NUMA_EMU number of nodes. This is only useful for debugging. config NODES_SHIFT - int + int "Max num nodes shift(1-15)" range 1 15 if X86_64 default "6" if X86_64 default "4" if X86_NUMAQ -- cgit v1.2.1 From 89bda4fccbd49f4b2bf59d0165391696037be856 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 21 Mar 2008 20:18:17 +0300 Subject: x86: use same index for processor maps Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 15265ee11f89..1b225cea5d7b 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -212,7 +212,6 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) return; } - cpu_set(num_processors, cpu_possible_map); num_processors++; cpus_complement(tmp_map, cpu_present_map); cpu = first_cpu(tmp_map); @@ -251,12 +250,13 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; cpu_to_apicid[cpu] = m->mpc_apicid; - bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; + bios_cpu_apicid[cpu] = m->mpc_apicid; } else { per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; } #endif + cpu_set(cpu, cpu_possible_map); cpu_set(cpu, cpu_present_map); } -- cgit v1.2.1 From 288621e32a7ae3a29c37b45297136c0264e2ff7b Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Fri, 21 Mar 2008 23:12:14 +0300 Subject: x86: relocate_kernel - use predefined PAGE_SIZE instead of own alias This patch does clean up relocate_kernel_(32|64).S a bit by getting rid of local PAGE_ALIGNED macro. We should use well-known PAGE_SIZE instead Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/relocate_kernel_32.S | 3 +-- arch/x86/kernel/relocate_kernel_64.S | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index f151d6fae462..ec4e6a0e0c2e 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -15,12 +15,11 @@ */ #define PTR(x) (x << 2) -#define PAGE_ALIGNED (1 << PAGE_SHIFT) #define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ #define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */ .text - .align PAGE_ALIGNED + .align PAGE_SIZE .globl relocate_kernel relocate_kernel: movl 8(%esp), %ebp /* list of pages */ diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 14e95872c6a3..e252c0ed9539 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -15,11 +15,10 @@ */ #define PTR(x) (x << 3) -#define PAGE_ALIGNED (1 << PAGE_SHIFT) #define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ .text - .align PAGE_ALIGNED + .align PAGE_SIZE .code64 .globl relocate_kernel relocate_kernel: -- cgit v1.2.1 From 9b967106da0357ef8b08847dce35584a04134f20 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 21 Mar 2008 15:14:07 -0700 Subject: x86: fix smpboot integration > yhlu@mpk:~/xx/xx/kernel/x86/linux-2.6> git-bisect bad > d1c707188ad646c8094cac9afb1738e7d0196ff2 is first bad commit > commit d1c707188ad646c8094cac9afb1738e7d0196ff2 > Author: Glauber de Oliveira Costa > Date: Wed Mar 19 14:25:53 2008 -0300 > > x86: include mach_apic.h in smpboot_64.c and smpboot.c > > After the inclusion, a lot of files needs fixing for conflicts, > some of them in the headers themselves, to accomodate for both > i386 and x86_64 versions. > > [ mingo@elte.hu: build fix ] > > Signed-off-by: Glauber Costa > Signed-off-by: Ingo Molnar > > :040000 040000 19f574e64bb8003bbe984f3a8c1315db969dfdcd > 6ffe96588c77bc936705599fa110107856201115 M arch > :040000 040000 61269347ad4f384ed85cc87c4f2d004ed94492ac > 8f5c713da25579a3cdf63db3d4c2f795261d0521 M include > yhlu@mpk:~/xx/xx/kernel/x86/linux-2.6> > attached patch fixes that. --- include/asm-x86/mach-default/mach_apicdef.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index 7b78275e6d33..e4b29ba37de6 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h @@ -5,13 +5,12 @@ #ifdef CONFIG_X86_64 #define APIC_ID_MASK (0xFFu<<24) +#define GET_APIC_ID(x) (((x)>>24)&0xFFu) #define SET_APIC_ID(x) (((x)<<24)) #else #define APIC_ID_MASK (0xF<<24) -#endif - static inline unsigned get_apic_id(unsigned long x) -{ +{ unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); if (APIC_XAPIC(ver)) return (((x)>>24)&0xFF); @@ -20,5 +19,6 @@ static inline unsigned get_apic_id(unsigned long x) } #define GET_APIC_ID(x) get_apic_id(x) +#endif #endif -- cgit v1.2.1 From c64df70793a9c344874eb4af19f85e0662d2d3ee Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 21 Mar 2008 18:56:19 -0700 Subject: x86: memtest bootparam Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- Documentation/kernel-parameters.txt | 5 +++++ arch/x86/Kconfig | 29 +++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 10 +++++----- arch/x86/mm/init_64.c | 24 +++++++++++++++++------- 4 files changed, 56 insertions(+), 12 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 43c527d72f2f..f9ea0803d5d6 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1147,6 +1147,11 @@ and is between 256 and 4096 characters. It is defined in the file or memmap=0x10000$0x18690000 + memtest= [KNL,X86_64] Enable memtest + Format: + range: 0,4 : pattern number + default : 0 + meye.*= [HW] Set MotionEye Camera parameters See Documentation/video4linux/meye.txt. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e50e38e76d9f..a0d7406e8b37 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -382,6 +382,35 @@ config PARAVIRT endif +config MEMTEST_BOOTPARAM + bool "Memtest boot parameter" + depends on X86_64 + default y + help + This option adds a kernel parameter 'memtest', which allows memtest + to be disabled at boot. If this option is selected, memtest + functionality can be disabled with memtest=0 on the kernel + command line. The purpose of this option is to allow a single + kernel image to be distributed with memtest built in, but not + necessarily enabled. + + If you are unsure how to answer this question, answer Y. + +config MEMTEST_BOOTPARAM_VALUE + int "Memtest boot parameter default value (0-4)" + depends on MEMTEST_BOOTPARAM + range 0 4 + default 0 + help + This option sets the default value for the kernel parameter + 'memtest', which allows memtest to be disabled at boot. If this + option is set to 0 (zero), the memtest kernel parameter will + default to 0, disabling memtest at bootup. If this option is + set to 4, the memtest kernel parameter will default to 4, + enabling memtest at bootup, and use that as pattern number. + + If you are unsure how to answer this question, answer 0. + config ACPI_SRAT def_bool y depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH) diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 9184e6437c4f..d6ada0833876 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -241,7 +241,9 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, /* * Find next free range after *start */ -unsigned long __init find_e820_area_size(unsigned long start, unsigned long *sizep, unsigned long align) +unsigned long __init find_e820_area_size(unsigned long start, + unsigned long *sizep, + unsigned long align) { int i; @@ -254,17 +256,15 @@ unsigned long __init find_e820_area_size(unsigned long start, unsigned long *siz continue; addr = round_up(ei->addr, align); ei_last = ei->addr + ei->size; -// printk(KERN_DEBUG "find_e820_area_size : e820 %d [%llx, %lx]\n", i, ei->addr, ei_last); if (addr < start) addr = round_up(start, align); -// printk(KERN_DEBUG "find_e820_area_size : 0 [%lx, %lx]\n", addr, ei_last); if (addr >= ei_last) continue; *sizep = ei_last - addr; - while (bad_addr_size(&addr, sizep, align) && addr+ *sizep <= ei_last) + while (bad_addr_size(&addr, sizep, align) && + addr + *sizep <= ei_last) ; last = addr + *sizep; -// printk(KERN_DEBUG "find_e820_area_size : 1 [%lx, %lx]\n", addr, last); if (last > ei_last) continue; return addr; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 52f54ee4559f..ae225c3ae9a8 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -427,7 +427,10 @@ static void __init init_gbpages(void) direct_gbpages = 0; } -static void __init memtest(unsigned long start_phys, unsigned long size, unsigned pattern) +#ifdef CONFIG_MEMTEST_BOOTPARAM + +static void __init memtest(unsigned long start_phys, unsigned long size, + unsigned pattern) { unsigned long i; unsigned long *start; @@ -486,11 +489,12 @@ static void __init memtest(unsigned long start_phys, unsigned long size, unsigne } -static int __initdata memtest_pattern; +static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE; + static int __init parse_memtest(char *arg) { if (arg) - memtest_pattern = simple_strtoul(arg, NULL, 0) + 1; + memtest_pattern = simple_strtoul(arg, NULL, 0); return 0; } @@ -501,8 +505,10 @@ static void __init early_memtest(unsigned long start, unsigned long end) unsigned long t_start, t_size; unsigned pattern; - if (memtest_pattern) - printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); + if (!memtest_pattern) + return; + + printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); for (pattern = 0; pattern < memtest_pattern; pattern++) { t_start = start; t_size = 0; @@ -523,9 +529,13 @@ static void __init early_memtest(unsigned long start, unsigned long end) t_start += t_size; } } - if (memtest_pattern) - printk(KERN_CONT "\n"); + printk(KERN_CONT "\n"); } +#else +static void __init early_memtest(unsigned long start, unsigned long end) +{ +} +#endif /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. -- cgit v1.2.1 From dcfe946520719943fabd3e5ed13813956e48e37c Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 15 Apr 2008 23:17:42 -0700 Subject: x86: fix memtest print out Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ae225c3ae9a8..210243e94d84 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -473,7 +473,7 @@ static void __init memtest(unsigned long start_phys, unsigned long size, last_bad += incr; } else { if (start_bad) { - printk(KERN_INFO " %016lxx bad mem addr %016lx - %016lx reserved\n", + printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved", val, start_bad, last_bad + incr); reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); } @@ -482,7 +482,7 @@ static void __init memtest(unsigned long start_phys, unsigned long size, } } if (start_bad) { - printk(KERN_INFO " %016lx bad mem addr %016lx - %016lx reserved\n", + printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved", val, start_bad, last_bad + incr); reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); } -- cgit v1.2.1 From d3463c5a66147bdd21b5865ea29fdca50ea28f7f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 4 Apr 2008 13:31:15 +0200 Subject: undo "x86: fix breakage of vSMP irq operations" revert: "x86: fix breakage of vSMP irq operations" the irqflags.h unification will solve this in a cleaner way. Signed-off-by: Ingo Molnar --- include/asm-x86/irqflags.h | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 0e2292483b35..92021c1ffa3a 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -70,26 +70,6 @@ static inline void raw_local_irq_restore(unsigned long flags) native_restore_fl(flags); } -#ifdef CONFIG_X86_VSMP - -/* - * Interrupt control for the VSMP architecture: - */ - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags = __raw_local_save_flags(); - raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); -} - -static inline void raw_local_irq_enable(void) -{ - unsigned long flags = __raw_local_save_flags(); - raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); -} - -#else - static inline void raw_local_irq_disable(void) { native_irq_disable(); @@ -100,8 +80,6 @@ static inline void raw_local_irq_enable(void) native_irq_enable(); } -#endif - /* * Used in the idle loop; sti takes one instruction cycle * to complete: @@ -159,17 +137,10 @@ static inline unsigned long __raw_local_irq_save(void) #define raw_local_irq_save(flags) \ do { (flags) = __raw_local_irq_save(); } while (0) -#ifdef CONFIG_X86_VSMP -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); -} -#else static inline int raw_irqs_disabled_flags(unsigned long flags) { return !(flags & X86_EFLAGS_IF); } -#endif static inline int raw_irqs_disabled(void) { -- cgit v1.2.1 From 8dbeeb24e4d64a8a4bb49d54a6e4d987b9d3b6fc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:36 -0700 Subject: include/asm-x86/acpi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/acpi.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 7a72d6aa50be..14411c9de46f 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@ -67,16 +67,16 @@ int __acpi_release_global_lock(unsigned int *lock); */ #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ asm("divl %2;" \ - :"=a"(q32), "=d"(r32) \ - :"r"(d32), \ + : "=a"(q32), "=d"(r32) \ + : "r"(d32), \ "0"(n_lo), "1"(n_hi)) #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ asm("shrl $1,%2 ;" \ "rcrl $1,%3;" \ - :"=r"(n_hi), "=r"(n_lo) \ - :"0"(n_hi), "1"(n_lo)) + : "=r"(n_hi), "=r"(n_lo) \ + : "0"(n_hi), "1"(n_lo)) #ifdef CONFIG_ACPI extern int acpi_lapic; -- cgit v1.2.1 From 2ac1ea7ccd81f0383c6525e495a31d18fcac92db Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:37 -0700 Subject: include/asm-x86/alternative.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/alternative.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index d26416b5722c..1f6a9ca10126 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h @@ -66,8 +66,8 @@ extern void alternatives_smp_module_del(struct module *mod); extern void alternatives_smp_switch(int smp); #else static inline void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) {} + void *locks, void *locks_end, + void *text, void *text_end) {} static inline void alternatives_smp_module_del(struct module *mod) {} static inline void alternatives_smp_switch(int smp) {} #endif /* CONFIG_SMP */ @@ -148,9 +148,8 @@ struct paravirt_patch_site; void apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *end); #else -static inline void -apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end) +static inline void apply_paravirt(struct paravirt_patch_site *start, + struct paravirt_patch_site *end) {} #define __parainstructions NULL #define __parainstructions_end NULL -- cgit v1.2.1 From b03aa8c6eb2d743ef4658b494dc1780e7740efc0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:38 -0700 Subject: include/asm-x86/a.out-core.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/a.out-core.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h index d2b6e11d3e97..714207a1c387 100644 --- a/include/asm-x86/a.out-core.h +++ b/include/asm-x86/a.out-core.h @@ -29,8 +29,9 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->magic = CMAGIC; dump->start_code = 0; dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; + dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; + dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) + >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; dump->u_debugreg[0] = current->thread.debugreg0; @@ -43,7 +44,8 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->u_debugreg[7] = current->thread.debugreg7; if (dump->start_stack < TASK_SIZE) - dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; + dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) + >> PAGE_SHIFT; dump->regs.bx = regs->bx; dump->regs.cx = regs->cx; @@ -55,7 +57,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->regs.ds = (u16)regs->ds; dump->regs.es = (u16)regs->es; dump->regs.fs = (u16)regs->fs; - savesegment(gs,gs); + savesegment(gs, gs); dump->regs.orig_ax = regs->orig_ax; dump->regs.ip = regs->ip; dump->regs.cs = (u16)regs->cs; @@ -63,7 +65,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->regs.sp = regs->sp; dump->regs.ss = (u16)regs->ss; - dump->u_fpvalid = dump_fpu (regs, &dump->i387); + dump->u_fpvalid = dump_fpu(regs, &dump->i387); } #endif /* CONFIG_X86_32 */ -- cgit v1.2.1 From 79a4a961ef92744dd8876963690a8beb56758457 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:39 -0700 Subject: include/asm-x86/apicdef.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/apicdef.h | 50 +++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 674a2280e21e..8b244683431b 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -14,10 +14,10 @@ #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF -#define GET_APIC_VERSION(x) ((x)&0xFFu) -#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) +#define GET_APIC_VERSION(x) ((x) & 0xFFu) +#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) #ifdef CONFIG_X86_32 -# define APIC_INTEGRATED(x) ((x)&0xF0u) +# define APIC_INTEGRATED(x) ((x) & 0xF0u) #else # define APIC_INTEGRATED(x) (1) #endif @@ -31,16 +31,16 @@ #define APIC_EIO_ACK 0x0 #define APIC_RRR 0xC0 #define APIC_LDR 0xD0 -#define APIC_LDR_MASK (0xFFu<<24) -#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) -#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) +#define APIC_LDR_MASK (0xFFu << 24) +#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) +#define SET_APIC_LOGICAL_ID(x) (((x) << 24)) #define APIC_ALL_CPUS 0xFFu #define APIC_DFR 0xE0 #define APIC_DFR_CLUSTER 0x0FFFFFFFul #define APIC_DFR_FLAT 0xFFFFFFFFul #define APIC_SPIV 0xF0 -#define APIC_SPIV_FOCUS_DISABLED (1<<9) -#define APIC_SPIV_APIC_ENABLED (1<<8) +#define APIC_SPIV_FOCUS_DISABLED (1 << 9) +#define APIC_SPIV_APIC_ENABLED (1 << 8) #define APIC_ISR 0x100 #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ #define APIC_TMR 0x180 @@ -76,27 +76,27 @@ #define APIC_DM_EXTINT 0x00700 #define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 -#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) -#define SET_APIC_DEST_FIELD(x) ((x)<<24) +#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) +#define SET_APIC_DEST_FIELD(x) ((x) << 24) #define APIC_LVTT 0x320 #define APIC_LVTTHMR 0x330 #define APIC_LVTPC 0x340 #define APIC_LVT0 0x350 -#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) -#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) -#define SET_APIC_TIMER_BASE(x) (((x)<<18)) +#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) +#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) +#define SET_APIC_TIMER_BASE(x) (((x) << 18)) #define APIC_TIMER_BASE_CLKIN 0x0 #define APIC_TIMER_BASE_TMBASE 0x1 #define APIC_TIMER_BASE_DIV 0x2 -#define APIC_LVT_TIMER_PERIODIC (1<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) +#define APIC_LVT_TIMER_PERIODIC (1 << 17) +#define APIC_LVT_MASKED (1 << 16) +#define APIC_LVT_LEVEL_TRIGGER (1 << 15) +#define APIC_LVT_REMOTE_IRR (1 << 14) +#define APIC_INPUT_POLARITY (1 << 13) +#define APIC_SEND_PENDING (1 << 12) #define APIC_MODE_MASK 0x700 -#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) -#define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8)) +#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) +#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 #define APIC_MODE_EXTINT 0x7 @@ -105,7 +105,7 @@ #define APIC_TMICT 0x380 #define APIC_TMCCT 0x390 #define APIC_TDCR 0x3E0 -#define APIC_TDR_DIV_TMBASE (1<<2) +#define APIC_TDR_DIV_TMBASE (1 << 2) #define APIC_TDR_DIV_1 0xB #define APIC_TDR_DIV_2 0x0 #define APIC_TDR_DIV_4 0x1 @@ -115,14 +115,14 @@ #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA #define APIC_EILVT0 0x500 -#define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */ +#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ #define APIC_EILVT_NR_AMD_10H 4 -#define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF) +#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) #define APIC_EILVT_MSG_FIX 0x0 #define APIC_EILVT_MSG_SMI 0x2 #define APIC_EILVT_MSG_NMI 0x4 #define APIC_EILVT_MSG_EXT 0x7 -#define APIC_EILVT_MASKED (1<<16) +#define APIC_EILVT_MASKED (1 << 16) #define APIC_EILVT1 0x510 #define APIC_EILVT2 0x520 #define APIC_EILVT3 0x530 -- cgit v1.2.1 From 3c311febfa8cc240e2922931d7403a6bb7f3fa1b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:40 -0700 Subject: include/asm-x86/apic.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/apic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index b11d6524fb13..be9639a9a186 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h @@ -67,7 +67,7 @@ static inline void native_apic_write(unsigned long reg, u32 v) static inline void native_apic_write_atomic(unsigned long reg, u32 v) { - (void) xchg((u32*)(APIC_BASE + reg), v); + (void)xchg((u32 *)(APIC_BASE + reg), v); } static inline u32 native_apic_read(unsigned long reg) -- cgit v1.2.1 From 78ff12eec42a4141d22dac4fdab04994384f6385 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:41 -0700 Subject: include/asm-x86/atomic_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/atomic_32.h | 143 +++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 75 deletions(-) diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h index 437aac801711..21a4825148c0 100644 --- a/include/asm-x86/atomic_32.h +++ b/include/asm-x86/atomic_32.h @@ -15,138 +15,133 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { int counter; } atomic_t; +typedef struct { + int counter; +} atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t - * + * * Atomically reads the value of @v. - */ + */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value - * + * * Atomically sets the value of @v to @i. - */ -#define atomic_set(v,i) (((v)->counter) = (i)) + */ +#define atomic_set(v, i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v. */ -static __inline__ void atomic_add(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"+m" (v->counter) - :"ir" (i)); + asm volatile(LOCK_PREFIX "addl %1,%0" + : "+m" (v->counter) + : "ir" (i)); } /** * atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v. */ -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"+m" (v->counter) - :"ir" (i)); + asm volatile(LOCK_PREFIX "subl %1,%0" + : "+m" (v->counter) + : "ir" (i)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +static inline int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) + */ +static inline void atomic_inc(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"+m" (v->counter)); + asm volatile(LOCK_PREFIX "incl %0" + : "+m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) + */ +static inline void atomic_dec(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"+m" (v->counter)); + asm volatile(LOCK_PREFIX "decl %0" + : "+m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) + */ +static inline int atomic_dec_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); + asm volatile(LOCK_PREFIX "decl %0; sete %1" + : "+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } /** - * atomic_inc_and_test - increment and test + * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) + */ +static inline int atomic_inc_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); + asm volatile(LOCK_PREFIX "incl %0; sete %1" + : "+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add - * + * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) + */ +static inline int atomic_add_negative(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __inline__ int atomic_add_return(int i, atomic_t *v) +static inline int atomic_add_return(int i, atomic_t *v) { int __i; #ifdef CONFIG_M386 unsigned long flags; - if(unlikely(boot_cpu_data.x86 <= 3)) + if (unlikely(boot_cpu_data.x86 <= 3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1" - :"+r" (i), "+m" (v->counter) - : : "memory"); + asm volatile(LOCK_PREFIX "xaddl %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; #ifdef CONFIG_M386 @@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */ * * Atomically subtracts @i from @v and returns @v - @i */ -static __inline__ int atomic_sub_return(int i, atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t *v) { - return atomic_add_return(-i,v); + return atomic_add_return(-i, v); } #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) @@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) /* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") +#define atomic_clear_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "andl %0,%1" \ + : : "r" (~(mask)), "m" (*(addr)) : "memory") -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" (mask),"m" (*(addr)) : "memory") +#define atomic_set_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "orl %0,%1" \ + : : "r" (mask), "m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() -- cgit v1.2.1 From 7edb3cd6cbadb686864b8f180232e4dc69d959e8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:42 -0700 Subject: include/asm-x86/atomic_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/atomic_64.h | 251 +++++++++++++++++++++----------------------- 1 file changed, 119 insertions(+), 132 deletions(-) diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 2d20a7a19f62..3e0cd7d38335 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h @@ -22,140 +22,135 @@ * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ -typedef struct { int counter; } atomic_t; +typedef struct { + int counter; +} atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t - * + * * Atomically reads the value of @v. - */ + */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value - * + * * Atomically sets the value of @v to @i. - */ -#define atomic_set(v,i) (((v)->counter) = (i)) + */ +#define atomic_set(v, i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v. */ -static __inline__ void atomic_add(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "addl %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v. */ -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "subl %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t - * + * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) +static inline int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) + */ +static inline void atomic_inc(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "incl %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) + */ +static inline void atomic_dec(atomic_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "decl %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t - * + * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) + */ +static inline int atomic_dec_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "decl %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } /** - * atomic_inc_and_test - increment and test + * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t - * + * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) + */ +static inline int atomic_inc_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "incl %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } @@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) * atomic_add_negative - add and test if negative * @i: integer value to add * @v: pointer of type atomic_t - * + * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) + */ +static inline int atomic_add_negative(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } @@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __inline__ int atomic_add_return(int i, atomic_t *v) +static inline int atomic_add_return(int i, atomic_t *v) { int __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1" - :"+r" (i), "+m" (v->counter) - : : "memory"); + asm volatile(LOCK_PREFIX "xaddl %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; } -static __inline__ int atomic_sub_return(int i, atomic_t *v) +static inline int atomic_sub_return(int i, atomic_t *v) { - return atomic_add_return(-i,v); + return atomic_add_return(-i, v); } -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) /* An 64bit atomic type */ -typedef struct { long counter; } atomic64_t; +typedef struct { + long counter; +} atomic64_t; #define ATOMIC64_INIT(i) { (i) } @@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t; * * Atomically sets the value of @v to @i. */ -#define atomic64_set(v,i) (((v)->counter) = (i)) +#define atomic64_set(v, i) (((v)->counter) = (i)) /** * atomic64_add - add integer to atomic64 variable @@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t; * * Atomically adds @i to @v. */ -static __inline__ void atomic64_add(long i, atomic64_t *v) +static inline void atomic64_add(long i, atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "addq %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** @@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) * * Atomically subtracts @i from @v. */ -static __inline__ void atomic64_sub(long i, atomic64_t *v) +static inline void atomic64_sub(long i, atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "subq %1,%0" - :"=m" (v->counter) - :"ir" (i), "m" (v->counter)); + asm volatile(LOCK_PREFIX "subq %1,%0" + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); } /** @@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) +static inline int atomic64_sub_and_test(long i, atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "subq %2,%0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } @@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) * * Atomically increments @v by 1. */ -static __inline__ void atomic64_inc(atomic64_t *v) +static inline void atomic64_inc(atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "incq %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** @@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v) * * Atomically decrements @v by 1. */ -static __inline__ void atomic64_dec(atomic64_t *v) +static inline void atomic64_dec(atomic64_t *v) { - __asm__ __volatile__( - LOCK_PREFIX "decq %0" - :"=m" (v->counter) - :"m" (v->counter)); + asm volatile(LOCK_PREFIX "decq %0" + : "=m" (v->counter) + : "m" (v->counter)); } /** @@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static __inline__ int atomic64_dec_and_test(atomic64_t *v) +static inline int atomic64_dec_and_test(atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "decq %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "decq %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } @@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static __inline__ int atomic64_inc_and_test(atomic64_t *v) +static inline int atomic64_inc_and_test(atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "incq %0; sete %1" - :"=m" (v->counter), "=qm" (c) - :"m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "incq %0; sete %1" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); return c != 0; } @@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static __inline__ int atomic64_add_negative(long i, atomic64_t *v) +static inline int atomic64_add_negative(long i, atomic64_t *v) { unsigned char c; - __asm__ __volatile__( - LOCK_PREFIX "addq %2,%0; sets %1" - :"=m" (v->counter), "=qm" (c) - :"ir" (i), "m" (v->counter) : "memory"); + asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" + : "=m" (v->counter), "=qm" (c) + : "ir" (i), "m" (v->counter) : "memory"); return c; } @@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __inline__ long atomic64_add_return(long i, atomic64_t *v) +static inline long atomic64_add_return(long i, atomic64_t *v) { long __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddq %0, %1;" - :"+r" (i), "+m" (v->counter) - : : "memory"); + asm volatile(LOCK_PREFIX "xaddq %0, %1;" + : "+r" (i), "+m" (v->counter) + : : "memory"); return i + __i; } -static __inline__ long atomic64_sub_return(long i, atomic64_t *v) +static inline long atomic64_sub_return(long i, atomic64_t *v) { - return atomic64_add_return(-i,v); + return atomic64_add_return(-i, v); } -#define atomic64_inc_return(v) (atomic64_add_return(1,v)) -#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) +#define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +#define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) -#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) +#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) /** * atomic_add_unless - add unless the number is a given value @@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { long c, old; c = atomic64_read(v); @@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") - -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") +#define atomic_clear_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "andl %0,%1" \ + : : "r" (~(mask)), "m" (*(addr)) : "memory") + +#define atomic_set_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "orl %0,%1" \ + : : "r" ((unsigned)(mask)), "m" (*(addr)) \ + : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() -- cgit v1.2.1 From fd591acd0f0b2382e8c0a839be9974e7c019bdb6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:43 -0700 Subject: include/asm-x86/bitops_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops_32.h | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index e4d75fcf9c03..2513a81f82aa 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -20,20 +20,22 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) if (!size) return 0; - /* This looks at memory. Mark it volatile to tell gcc not to move it around */ - __asm__ __volatile__( - "movl $-1,%%eax\n\t" - "xorl %%edx,%%edx\n\t" - "repe; scasl\n\t" - "je 1f\n\t" - "xorl -4(%%edi),%%eax\n\t" - "subl $4,%%edi\n\t" - "bsfl %%eax,%%edx\n" - "1:\tsubl %%ebx,%%edi\n\t" - "shll $3,%%edi\n\t" - "addl %%edi,%%edx" - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) - :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); + /* This looks at memory. + * Mark it volatile to tell gcc not to move it around + */ + asm volatile("movl $-1,%%eax\n\t" + "xorl %%edx,%%edx\n\t" + "repe; scasl\n\t" + "je 1f\n\t" + "xorl -4(%%edi),%%eax\n\t" + "subl $4,%%edi\n\t" + "bsfl %%eax,%%edx\n" + "1:\tsubl %%ebx,%%edi\n\t" + "shll $3,%%edi\n\t" + "addl %%edi,%%edx" + : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) + : "1" ((size + 31) >> 5), "2" (addr), + "b" (addr) : "memory"); return res; } @@ -75,7 +77,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) unsigned long val = *addr++; if (val) return __ffs(val) + x; - x += (sizeof(*addr)<<3); + x += sizeof(*addr) << 3; } return x; } @@ -152,10 +154,10 @@ static inline int fls(int x) #include -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr), (unsigned long *)addr) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr), (unsigned long *)addr) +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)(addr)) #include -- cgit v1.2.1 From 49f74946f008add0b22723244976a32b365de06f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:44 -0700 Subject: include/asm-x86/bitops_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops_64.h | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index aaf15194d536..365f8207ea59 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -17,35 +17,35 @@ static inline long __scanbit(unsigned long val, unsigned long max) return val; } -#define find_first_bit(addr,size) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - (__scanbit(*(unsigned long *)addr,(size))) : \ - find_first_bit(addr,size))) - #define find_next_bit(addr,size,off) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ find_next_bit(addr,size,off))) -#define find_first_zero_bit(addr,size) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - (__scanbit(~*(unsigned long *)addr,(size))) : \ - find_first_zero_bit(addr,size))) - #define find_next_zero_bit(addr,size,off) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ find_next_zero_bit(addr,size,off))) -static inline void set_bit_string(unsigned long *bitmap, unsigned long i, - int len) -{ - unsigned long end = i + len; +#define find_first_bit(addr, size) \ + ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ + ? (__scanbit(*(unsigned long *)(addr), (size))) \ + : find_first_bit((addr), (size)))) + +#define find_first_zero_bit(addr, size) \ + ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ + ? (__scanbit(~*(unsigned long *)(addr), (size))) \ + : find_first_zero_bit((addr), (size)))) + +static inline void set_bit_string(unsigned long *bitmap, unsigned long i, + int len) +{ + unsigned long end = i + len; while (i < end) { - __set_bit(i, bitmap); + __set_bit(i, bitmap); i++; } -} +} /** * ffz - find first zero in word. @@ -150,10 +150,10 @@ static inline int fls(int x) #include -#define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit_atomic(lock,nr,addr) \ - test_and_clear_bit((nr),(unsigned long*)addr) +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)(addr)) #include -- cgit v1.2.1 From 286275c90f148562b973b1e1f39f9689e6676dc4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:45 -0700 Subject: include/asm-x86/bitops.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops.h | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 7a76555b676a..1ae7b270a1ef 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -23,13 +23,13 @@ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) /* Technically wrong, but this avoids compilation errors on some gcc versions. */ -#define ADDR "=m" (*(volatile long *) addr) -#define BIT_ADDR "=m" (((volatile int *) addr)[nr >> 5]) +#define ADDR "=m" (*(volatile long *)addr) +#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) #else #define ADDR "+m" (*(volatile long *) addr) -#define BIT_ADDR "+m" (((volatile int *) addr)[nr >> 5]) +#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) #endif -#define BASE_ADDR "m" (*(volatile int *) addr) +#define BASE_ADDR "m" (*(volatile int *)addr) /** * set_bit - Atomically set a bit in memory @@ -48,9 +48,7 @@ */ static inline void set_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "bts %1,%0" - : ADDR - : "Ir" (nr) : "memory"); + asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); } /** @@ -82,8 +80,7 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%2" - : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -151,8 +148,7 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%2" - : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -168,9 +164,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) int oldbit; asm volatile(LOCK_PREFIX "bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; } @@ -202,8 +196,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) asm volatile("bts %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR - : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -221,8 +214,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) asm volatile(LOCK_PREFIX "btr %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; } @@ -242,8 +234,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) asm volatile("btr %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR - : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -254,8 +245,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) asm volatile("btc %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR - : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -274,8 +264,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) asm volatile(LOCK_PREFIX "btc %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); return oldbit; } -- cgit v1.2.1 From 86d8a08616ecbc510323bfca591816a5709c6e54 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:46 -0700 Subject: include/asm-x86/bug.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bug.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index 8d477a201392..b69aa64b82a4 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h @@ -12,25 +12,25 @@ # define __BUG_C0 "2:\t.quad 1b, %c0\n" #endif -#define BUG() \ - do { \ - asm volatile("1:\tud2\n" \ - ".pushsection __bug_table,\"a\"\n" \ - __BUG_C0 \ - "\t.word %c1, 0\n" \ - "\t.org 2b+%c2\n" \ - ".popsection" \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (sizeof(struct bug_entry))); \ - for(;;) ; \ - } while(0) +#define BUG() \ +do { \ + asm volatile("1:\tud2\n" \ + ".pushsection __bug_table,\"a\"\n" \ + __BUG_C0 \ + "\t.word %c1, 0\n" \ + "\t.org 2b+%c2\n" \ + ".popsection" \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (sizeof(struct bug_entry))); \ + for (;;) ; \ +} while (0) #else -#define BUG() \ - do { \ - asm volatile("ud2"); \ - for(;;) ; \ - } while(0) +#define BUG() \ +do { \ + asm volatile("ud2"); \ + for (;;) ; \ +} while (0) #endif #endif /* !CONFIG_BUG */ -- cgit v1.2.1 From 346050952cac11b25a98c7e1743412b416827314 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:47 -0700 Subject: include/asm-x86/byteorder.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/byteorder.h | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index fe2f2e5d51ba..e02ae2d89acf 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h @@ -8,50 +8,59 @@ #ifdef __i386__ -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) { #ifdef CONFIG_X86_BSWAP - __asm__("bswap %0" : "=r" (x) : "0" (x)); + asm("bswap %0" : "=r" (x) : "0" (x)); #else - __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ - "rorl $16,%0\n\t" /* swap words */ - "xchgb %b0,%h0" /* swap higher bytes */ - :"=q" (x) - : "0" (x)); + asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ + "rorl $16,%0\n\t" /* swap words */ + "xchgb %b0,%h0" /* swap higher bytes */ + : "=q" (x) + : "0" (x)); #endif return x; } -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) +static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) { union { - struct { __u32 a,b; } s; + struct { + __u32 a; + __u32 b; + } s; __u64 u; } v; v.u = val; #ifdef CONFIG_X86_BSWAP - __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1" + asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #else v.s.a = ___arch__swab32(v.s.a); v.s.b = ___arch__swab32(v.s.b); - __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); + asm("xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); #endif return v.u; } #else /* __i386__ */ -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) +static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) { - __asm__("bswapq %0" : "=r" (x) : "0" (x)); + asm("bswapq %0" + : "=r" (x) + : "0" (x)); return x; } -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) { - __asm__("bswapl %0" : "=r" (x) : "0" (x)); + asm("bswapl %0" + : "=r" (x) + : "0" (x)); return x; } -- cgit v1.2.1 From 3f61b19a9f60a0469a19113be8bb3b3623006de2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:48 -0700 Subject: include/asm-x86/cacheflush.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 90437d3f7610..7ab5b520b7bd 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -14,18 +14,18 @@ #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_icache_page(vma, pg) do { } while (0) +#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + memcpy((dst), (src), (len)) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy((dst), (src), (len)) int __deprecated_for_modules change_page_attr(struct page *page, int numpages, - pgprot_t prot); + pgprot_t prot); int set_pages_uc(struct page *page, int numpages); int set_pages_wb(struct page *page, int numpages); -- cgit v1.2.1 From 0883e91ae209f4ada4db9b383026df77351c1320 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:49 -0700 Subject: include/asm-x86/checksum_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/checksum_32.h | 152 +++++++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 77 deletions(-) diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h index 75194abbe8ee..52bbb0d8c4c1 100644 --- a/include/asm-x86/checksum_32.h +++ b/include/asm-x86/checksum_32.h @@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); */ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone @@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, * If you use these functions directly please don't forget the * access_ok(). */ -static __inline__ -__wsum csum_partial_copy_nocheck (const void *src, void *dst, - int len, __wsum sum) +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum) { - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } -static __inline__ -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +static inline __wsum csum_partial_copy_from_user(const void __user *src, + void *dst, + int len, __wsum sum, + int *err_ptr) { might_sleep(); return csum_partial_copy_generic((__force void *)src, dst, - len, sum, err_ptr, NULL); + len, sum, err_ptr, NULL); } /* @@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum; - __asm__ __volatile__( - "movl (%1), %0 ;\n" - "subl $4, %2 ;\n" - "jbe 2f ;\n" - "addl 4(%1), %0 ;\n" - "adcl 8(%1), %0 ;\n" - "adcl 12(%1), %0 ;\n" -"1: adcl 16(%1), %0 ;\n" - "lea 4(%1), %1 ;\n" - "decl %2 ;\n" - "jne 1b ;\n" - "adcl $0, %0 ;\n" - "movl %0, %2 ;\n" - "shrl $16, %0 ;\n" - "addw %w2, %w0 ;\n" - "adcl $0, %0 ;\n" - "notl %0 ;\n" -"2: ;\n" + asm volatile("movl (%1), %0 ;\n" + "subl $4, %2 ;\n" + "jbe 2f ;\n" + "addl 4(%1), %0 ;\n" + "adcl 8(%1), %0 ;\n" + "adcl 12(%1), %0;\n" + "1: adcl 16(%1), %0 ;\n" + "lea 4(%1), %1 ;\n" + "decl %2 ;\n" + "jne 1b ;\n" + "adcl $0, %0 ;\n" + "movl %0, %2 ;\n" + "shrl $16, %0 ;\n" + "addw %w2, %w0 ;\n" + "adcl $0, %0 ;\n" + "notl %0 ;\n" + "2: ;\n" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl) - : "1" (iph), "2" (ihl) - : "memory"); + : "=r" (sum), "=r" (iph), "=r" (ihl) + : "1" (iph), "2" (ihl) + : "memory"); return (__force __sum16)sum; } @@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) static inline __sum16 csum_fold(__wsum sum) { - __asm__( - "addl %1, %0 ;\n" - "adcl $0xffff, %0 ;\n" - : "=r" (sum) - : "r" ((__force u32)sum << 16), - "0" ((__force u32)sum & 0xffff0000) - ); + asm("addl %1, %0 ;\n" + "adcl $0xffff, %0 ;\n" + : "=r" (sum) + : "r" ((__force u32)sum << 16), + "0" ((__force u32)sum & 0xffff0000)); return (__force __sum16)(~(__force u32)sum >> 16); } static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { - __asm__( - "addl %1, %0 ;\n" - "adcl %2, %0 ;\n" - "adcl %3, %0 ;\n" - "adcl $0, %0 ;\n" - : "=r" (sum) - : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); - return sum; + asm("addl %1, %0 ;\n" + "adcl %2, %0 ;\n" + "adcl %3, %0 ;\n" + "adcl $0, %0 ;\n" + : "=r" (sum) + : "g" (daddr), "g"(saddr), + "g" ((len + proto) << 8), "0" (sum)); + return sum; } /* @@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } /* @@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, static inline __sum16 ip_compute_csum(const void *buff, int len) { - return csum_fold (csum_partial(buff, len, 0)); + return csum_fold(csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM -static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) { - __asm__( - "addl 0(%1), %0 ;\n" - "adcl 4(%1), %0 ;\n" - "adcl 8(%1), %0 ;\n" - "adcl 12(%1), %0 ;\n" - "adcl 0(%2), %0 ;\n" - "adcl 4(%2), %0 ;\n" - "adcl 8(%2), %0 ;\n" - "adcl 12(%2), %0 ;\n" - "adcl %3, %0 ;\n" - "adcl %4, %0 ;\n" - "adcl $0, %0 ;\n" - : "=&r" (sum) - : "r" (saddr), "r" (daddr), - "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); + asm("addl 0(%1), %0 ;\n" + "adcl 4(%1), %0 ;\n" + "adcl 8(%1), %0 ;\n" + "adcl 12(%1), %0 ;\n" + "adcl 0(%2), %0 ;\n" + "adcl 4(%2), %0 ;\n" + "adcl 8(%2), %0 ;\n" + "adcl 12(%2), %0 ;\n" + "adcl %3, %0 ;\n" + "adcl %4, %0 ;\n" + "adcl $0, %0 ;\n" + : "=&r" (sum) + : "r" (saddr), "r" (daddr), + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); return csum_fold(sum); } @@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) +static inline __wsum csum_and_copy_to_user(const void *src, + void __user *dst, + int len, __wsum sum, + int *err_ptr) { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); + return csum_partial_copy_generic(src, (__force void *)dst, + len, sum, NULL, err_ptr); if (len) *err_ptr = -EFAULT; -- cgit v1.2.1 From 3d3c6e10036dcbbe9fe7d69911f5638faecfbaeb Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:50 -0700 Subject: include/asm-x86/checksum_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/checksum_64.h | 118 ++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 61 deletions(-) diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index e5f79997decc..8bd861cc5267 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h @@ -1,33 +1,31 @@ #ifndef _X86_64_CHECKSUM_H #define _X86_64_CHECKSUM_H -/* - * Checksums for x86-64 - * Copyright 2002 by Andi Kleen, SuSE Labs +/* + * Checksums for x86-64 + * Copyright 2002 by Andi Kleen, SuSE Labs * with some code from asm-x86/checksum.h - */ + */ #include #include #include -/** +/** * csum_fold - Fold and invert a 32bit checksum. * sum: 32bit unfolded sum - * + * * Fold a 32bit running checksum to 16bit and invert it. This is usually * the last step before putting a checksum into a packet. * Make sure not to mix with 64bit checksums. */ static inline __sum16 csum_fold(__wsum sum) { - __asm__( - " addl %1,%0\n" - " adcl $0xffff,%0" - : "=r" (sum) - : "r" ((__force u32)sum << 16), - "0" ((__force u32)sum & 0xffff0000) - ); + asm(" addl %1,%0\n" + " adcl $0xffff,%0" + : "=r" (sum) + : "r" ((__force u32)sum << 16), + "0" ((__force u32)sum & 0xffff0000)); return (__force __sum16)(~(__force u32)sum >> 16); } @@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum) * ip_fast_csum - Compute the IPv4 header checksum efficiently. * iph: ipv4 header * ihl: length of header / 4 - */ + */ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum; - asm( " movl (%1), %0\n" - " subl $4, %2\n" - " jbe 2f\n" - " addl 4(%1), %0\n" - " adcl 8(%1), %0\n" - " adcl 12(%1), %0\n" - "1: adcl 16(%1), %0\n" - " lea 4(%1), %1\n" - " decl %2\n" - " jne 1b\n" - " adcl $0, %0\n" - " movl %0, %2\n" - " shrl $16, %0\n" - " addw %w2, %w0\n" - " adcl $0, %0\n" - " notl %0\n" - "2:" + asm(" movl (%1), %0\n" + " subl $4, %2\n" + " jbe 2f\n" + " addl 4(%1), %0\n" + " adcl 8(%1), %0\n" + " adcl 12(%1), %0\n" + "1: adcl 16(%1), %0\n" + " lea 4(%1), %1\n" + " decl %2\n" + " jne 1b\n" + " adcl $0, %0\n" + " movl %0, %2\n" + " shrl $16, %0\n" + " addw %w2, %w0\n" + " adcl $0, %0\n" + " notl %0\n" + "2:" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl) - : "1" (iph), "2" (ihl) - : "memory"); + : "=r" (sum), "=r" (iph), "=r" (ihl) + : "1" (iph), "2" (ihl) + : "memory"); return (__force __sum16)sum; } -/** +/** * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: ip protocol of packet - * @sum: initial sum to be added in (32bit unfolded) - * - * Returns the pseudo header checksum the input data. Result is + * @sum: initial sum to be added in (32bit unfolded) + * + * Returns the pseudo header checksum the input data. Result is * 32bit unfolded. */ static inline __wsum @@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, " adcl %2, %0\n" " adcl %3, %0\n" " adcl $0, %0\n" - : "=r" (sum) + : "=r" (sum) : "g" (daddr), "g" (saddr), "g" ((len + proto)<<8), "0" (sum)); - return sum; + return sum; } -/** +/** * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: ip protocol of packet - * @sum: initial sum to be added in (32bit unfolded) - * + * @sum: initial sum to be added in (32bit unfolded) + * * Returns the 16bit pseudo header checksum the input data already * complemented and ready to be filled in. */ -static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, __wsum sum) +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } -/** +/** * csum_partial - Compute an internet checksum. * @buff: buffer to be checksummed * @len: length of buffer. @@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, * Returns the 32bit unfolded internet checksum of the buffer. * Before filling it in it needs to be csum_fold()'ed. * buff should be aligned to a 64bit boundary if possible. - */ + */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 @@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); /* Do not call this directly. Use the wrappers below */ extern __wsum csum_partial_copy_generic(const void *src, const void *dst, - int len, - __wsum sum, - int *src_err_ptr, int *dst_err_ptr); + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum isum, int *errp); + int len, __wsum isum, int *errp); extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, - int len, __wsum isum, int *errp); -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, - __wsum sum); + int len, __wsum isum, int *errp); +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); /* Old names. To be removed. */ #define csum_and_copy_to_user csum_partial_copy_to_user #define csum_and_copy_from_user csum_partial_copy_from_user -/** +/** * ip_compute_csum - Compute an 16bit IP checksum. * @buff: buffer address. * @len: length of buffer. @@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len); * @proto: protocol of packet * @sum: initial sum (32bit unfolded) to be added in * - * Computes an IPv6 pseudo header checksum. This sum is added the checksum + * Computes an IPv6 pseudo header checksum. This sum is added the checksum * into UDP/TCP packets and contains some link layer information. * Returns the unfolded 32bit checksum. */ @@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, static inline unsigned add32_with_carry(unsigned a, unsigned b) { asm("addl %2,%0\n\t" - "adcl $0,%0" - : "=r" (a) + "adcl $0,%0" + : "=r" (a) : "0" (a), "r" (b)); return a; } #endif - -- cgit v1.2.1 From 8121019cad7bfe61f8f626a85427aca66dfe0f1e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:51 -0700 Subject: include/asm-x86/cmpxchg_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/cmpxchg_32.h | 253 ++++++++++++++++++++++--------------------- 1 file changed, 132 insertions(+), 121 deletions(-) diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index 959fad00dff5..bf5a69d1329e 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h @@ -8,9 +8,12 @@ * you need to test for the feature in boot_cpu_data. */ -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) +#define xchg(ptr, v) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) -struct __xchg_dummy { unsigned long a[100]; }; +struct __xchg_dummy { + unsigned long a[100]; +}; #define __xg(x) ((struct __xchg_dummy *)(x)) /* @@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; }; * of the instruction set reference 24319102.pdf. We need * the reader side to see the coherent 64bit value. */ -static inline void __set_64bit (unsigned long long * ptr, - unsigned int low, unsigned int high) +static inline void __set_64bit(unsigned long long *ptr, + unsigned int low, unsigned int high) { - __asm__ __volatile__ ( - "\n1:\t" - "movl (%0), %%eax\n\t" - "movl 4(%0), %%edx\n\t" - LOCK_PREFIX "cmpxchg8b (%0)\n\t" - "jnz 1b" - : /* no outputs */ - : "D"(ptr), - "b"(low), - "c"(high) - : "ax","dx","memory"); + asm volatile("\n1:\t" + "movl (%0), %%eax\n\t" + "movl 4(%0), %%edx\n\t" + LOCK_PREFIX "cmpxchg8b (%0)\n\t" + "jnz 1b" + : /* no outputs */ + : "D"(ptr), + "b"(low), + "c"(high) + : "ax", "dx", "memory"); } -static inline void __set_64bit_constant (unsigned long long *ptr, - unsigned long long value) +static inline void __set_64bit_constant(unsigned long long *ptr, + unsigned long long value) { - __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); + __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); } -#define ll_low(x) *(((unsigned int*)&(x))+0) -#define ll_high(x) *(((unsigned int*)&(x))+1) -static inline void __set_64bit_var (unsigned long long *ptr, - unsigned long long value) +#define ll_low(x) *(((unsigned int *)&(x)) + 0) +#define ll_high(x) *(((unsigned int *)&(x)) + 1) + +static inline void __set_64bit_var(unsigned long long *ptr, + unsigned long long value) { - __set_64bit(ptr,ll_low(value), ll_high(value)); + __set_64bit(ptr, ll_low(value), ll_high(value)); } -#define set_64bit(ptr,value) \ -(__builtin_constant_p(value) ? \ - __set_64bit_constant(ptr, value) : \ - __set_64bit_var(ptr, value) ) +#define set_64bit(ptr, value) \ + (__builtin_constant_p((value)) \ + ? __set_64bit_constant((ptr), (value)) \ + : __set_64bit_var((ptr), (value))) -#define _set_64bit(ptr,value) \ -(__builtin_constant_p(value) ? \ - __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ - __set_64bit(ptr, ll_low(value), ll_high(value)) ) +#define _set_64bit(ptr, value) \ + (__builtin_constant_p(value) \ + ? __set_64bit(ptr, (unsigned int)(value), \ + (unsigned int)((value) >> 32)) \ + : __set_64bit(ptr, ll_low((value)), ll_high((value)))) /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) { switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; + case 1: + asm volatile("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + asm volatile("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + asm volatile("xchgl %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; } return x; } @@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#define sync_cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) +#define sync_cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) #endif #ifdef CONFIG_X86_CMPXCHG64 -#define cmpxchg64(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ - (unsigned long long)(n))) -#define cmpxchg64_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ - (unsigned long long)(n))) +#define cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ + (unsigned long long)(n))) +#define cmpxchg64_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ + (unsigned long long)(n))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, @@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long prev; switch (size) { case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; @@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, * isn't. */ static inline unsigned long __sync_cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, int size) + unsigned long old, + unsigned long new, int size) { unsigned long prev; switch (size) { case 1: - __asm__ __volatile__("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("lock; cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; } static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, unsigned long new, int size) + unsigned long old, + unsigned long new, int size) { unsigned long prev; switch (size) { case 1: - __asm__ __volatile__("cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__("cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__("cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; } static inline unsigned long long __cmpxchg64(volatile void *ptr, - unsigned long long old, unsigned long long new) + unsigned long long old, + unsigned long long new) { unsigned long long prev; - __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchg8b %3" + : "=A"(prev) + : "b"((unsigned long)new), + "c"((unsigned long)(new >> 32)), + "m"(*__xg(ptr)), + "0"(old) + : "memory"); return prev; } static inline unsigned long long __cmpxchg64_local(volatile void *ptr, - unsigned long long old, unsigned long long new) + unsigned long long old, + unsigned long long new) { unsigned long long prev; - __asm__ __volatile__("cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); + asm volatile("cmpxchg8b %3" + : "=A"(prev) + : "b"((unsigned long)new), + "c"((unsigned long)(new >> 32)), + "m"(*__xg(ptr)), + "0"(old) + : "memory"); return prev; } @@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, - unsigned long new, int size) + unsigned long new, int size) { switch (size) { case 1: -- cgit v1.2.1 From e52da357a15db9e12b96b4e40dffe6b9e54bb976 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:52 -0700 Subject: include/asm-x86/cmpxchg_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/cmpxchg_64.h | 134 ++++++++++++++++++++++--------------------- 1 file changed, 69 insertions(+), 65 deletions(-) diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h index 56f5b41e071c..d9b26b9a28cf 100644 --- a/include/asm-x86/cmpxchg_64.h +++ b/include/asm-x86/cmpxchg_64.h @@ -3,7 +3,8 @@ #include /* Provides LOCK_PREFIX */ -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) +#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ + (ptr), sizeof(*(ptr)))) #define __xg(x) ((volatile long *)(x)) @@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) { switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %k0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 8: - __asm__ __volatile__("xchgq %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; + case 1: + asm volatile("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + asm volatile("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + asm volatile("xchgl %k0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 8: + asm volatile("xchgq %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; } return x; } @@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long prev; switch (size) { case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 8: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; } static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, unsigned long new, int size) + unsigned long old, + unsigned long new, int size) { unsigned long prev; switch (size) { case 1: - __asm__ __volatile__("cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 2: - __asm__ __volatile__("cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 4: - __asm__ __volatile__("cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgl %k1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; case 8: - __asm__ __volatile__("cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); + asm volatile("cmpxchgq %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); return prev; } return old; @@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define cmpxchg(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) + (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64(ptr, o, n) \ - ({ \ +({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg((ptr), (o), (n)); \ - }) +}) #define cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) + (unsigned long)(n), \ + sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) \ - ({ \ +({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ - }) +}) #endif -- cgit v1.2.1 From c96a6d41aa033c03b43e5a09448bddcc971797b1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:53 -0700 Subject: include/asm-x86/compat.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index d3e8f3e87ee8..1793ac317a30 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h @@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static __inline__ void __user *compat_alloc_user_space(long len) +static inline void __user *compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *)regs->sp - len; -- cgit v1.2.1 From 83ecfdd0a0a065e55172889dc660c5db3c14633a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:55 -0700 Subject: include/asm-x86/current_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/current_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h index d35248539912..5af9bdb97a16 100644 --- a/include/asm-x86/current_32.h +++ b/include/asm-x86/current_32.h @@ -11,7 +11,7 @@ static __always_inline struct task_struct *get_current(void) { return x86_read_percpu(current_task); } - + #define current get_current() #endif /* !(_I386_CURRENT_H) */ -- cgit v1.2.1 From 2cade899df7d9cc9e977a24898b0bd5a6f4a8dfd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:56 -0700 Subject: include/asm-x86/current_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/current_64.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h index bc8adecee66d..2d368ede2fc1 100644 --- a/include/asm-x86/current_64.h +++ b/include/asm-x86/current_64.h @@ -1,23 +1,23 @@ #ifndef _X86_64_CURRENT_H #define _X86_64_CURRENT_H -#if !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLY__) struct task_struct; #include -static inline struct task_struct *get_current(void) -{ - struct task_struct *t = read_pda(pcurrent); +static inline struct task_struct *get_current(void) +{ + struct task_struct *t = read_pda(pcurrent); return t; -} +} #define current get_current() #else #ifndef ASM_OFFSET_H -#include +#include #endif #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg -- cgit v1.2.1 From b77619001e3acd228209bfdd864734cfdddb3d92 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:57 -0700 Subject: include/asm-x86/desc_defs.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/desc_defs.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index e33f078b3e54..eccb4ea1f918 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h @@ -18,17 +18,19 @@ * incrementally. We keep the signature as a struct, rather than an union, * so we can get rid of it transparently in the future -- glommer */ -// 8 byte segment descriptor +/* 8 byte segment descriptor */ struct desc_struct { union { - struct { unsigned int a, b; }; + struct { + unsigned int a; + unsigned int b; + }; struct { u16 limit0; u16 base0; unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; }; - }; } __attribute__((packed)); @@ -39,7 +41,7 @@ enum { GATE_TASK = 0x5, }; -// 16byte gate +/* 16byte gate */ struct gate_struct64 { u16 offset_low; u16 segment; @@ -56,10 +58,10 @@ struct gate_struct64 { enum { DESC_TSS = 0x9, DESC_LDT = 0x2, - DESCTYPE_S = 0x10, /* !system */ + DESCTYPE_S = 0x10, /* !system */ }; -// LDT or TSS descriptor in the GDT. 16 bytes. +/* LDT or TSS descriptor in the GDT. 16 bytes. */ struct ldttss_desc64 { u16 limit0; u16 base0; @@ -84,7 +86,6 @@ struct desc_ptr { unsigned long address; } __attribute__((packed)) ; - #endif /* !__ASSEMBLY__ */ #endif -- cgit v1.2.1 From c1773a167defdec919b1dbfc9100054c44fd0ff5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:58 -0700 Subject: include/asm-x86/desc.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/desc.h | 61 +++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 5b6a05d3a771..268a012bcd79 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h @@ -62,8 +62,8 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) } static inline void pack_gate(gate_desc *gate, unsigned char type, - unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) - + unsigned long base, unsigned dpl, unsigned flags, + unsigned short seg) { gate->a = (seg << 16) | (base & 0xffff); gate->b = (base & 0xffff0000) | @@ -84,22 +84,23 @@ static inline int desc_empty(const void *ptr) #define load_TR_desc() native_load_tr_desc() #define load_gdt(dtr) native_load_gdt(dtr) #define load_idt(dtr) native_load_idt(dtr) -#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) -#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) +#define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) +#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) native_store_gdt(dtr) #define store_idt(dtr) native_store_idt(dtr) #define store_tr(tr) (tr = native_store_tr()) -#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) +#define store_ldt(ldt) asm("sldt %0":"=m" (ldt)) #define load_TLS(t, cpu) native_load_tls(t, cpu) #define set_ldt native_set_ldt -#define write_ldt_entry(dt, entry, desc) \ - native_write_ldt_entry(dt, entry, desc) -#define write_gdt_entry(dt, entry, desc, type) \ - native_write_gdt_entry(dt, entry, desc, type) -#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) +#define write_ldt_entry(dt, entry, desc) \ + native_write_ldt_entry(dt, entry, desc) +#define write_gdt_entry(dt, entry, desc, type) \ + native_write_gdt_entry(dt, entry, desc, type) +#define write_idt_entry(dt, entry, g) \ + native_write_idt_entry(dt, entry, g) #endif static inline void native_write_idt_entry(gate_desc *idt, int entry, @@ -138,8 +139,8 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, { desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | - (limit & 0x000f0000) | ((type & 0xff) << 8) | - ((flags & 0xf) << 20); + (limit & 0x000f0000) | ((type & 0xff) << 8) | + ((flags & 0xf) << 20); desc->p = 1; } @@ -159,7 +160,6 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; desc->base3 = PTR_HIGH(addr); #else - pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); #endif } @@ -177,7 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) * last valid byte */ set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, - IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); + IO_BITMAP_OFFSET + IO_BITMAP_BYTES + + sizeof(unsigned long) - 1); write_gdt_entry(d, entry, &tss, DESC_TSS); } @@ -186,7 +187,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) static inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) - __asm__ __volatile__("lldt %w0"::"q" (0)); + asm volatile("lldt %w0"::"q" (0)); else { unsigned cpu = smp_processor_id(); ldt_desc ldt; @@ -195,7 +196,7 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) DESC_LDT, entries * sizeof(ldt) - 1); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); - __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); + asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); } } @@ -240,15 +241,15 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; } -#define _LDT_empty(info) (\ - (info)->base_addr == 0 && \ - (info)->limit == 0 && \ - (info)->contents == 0 && \ - (info)->read_exec_only == 1 && \ - (info)->seg_32bit == 0 && \ - (info)->limit_in_pages == 0 && \ - (info)->seg_not_present == 1 && \ - (info)->useable == 0) +#define _LDT_empty(info) \ + ((info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ + (info)->read_exec_only == 1 && \ + (info)->seg_32bit == 0 && \ + (info)->limit_in_pages == 0 && \ + (info)->seg_not_present == 1 && \ + (info)->useable == 0) #ifdef CONFIG_X86_64 #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) @@ -287,7 +288,7 @@ static inline unsigned long get_desc_limit(const struct desc_struct *desc) } static inline void _set_gate(int gate, unsigned type, void *addr, - unsigned dpl, unsigned ist, unsigned seg) + unsigned dpl, unsigned ist, unsigned seg) { gate_desc s; pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); @@ -370,10 +371,10 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist) * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. */ #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ - movb idx*8+4(gdt), lo_b; \ - movb idx*8+7(gdt), hi_b; \ - shll $16, base; \ - movw idx*8+2(gdt), lo_w; + movb idx * 8 + 4(gdt), lo_b; \ + movb idx * 8 + 7(gdt), hi_b; \ + shll $16, base; \ + movw idx * 8 + 2(gdt), lo_w; #endif /* __ASSEMBLY__ */ -- cgit v1.2.1 From 925a09b27fc73593eb705e50e6db86c4a22091db Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:01:59 -0700 Subject: include/asm-x86/div64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/div64.h | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index e98d16e7a37a..0dbf8bf3ef0a 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h @@ -17,18 +17,20 @@ * This ends up being the most efficient "calling * convention" on x86. */ -#define do_div(n,base) ({ \ - unsigned long __upper, __low, __high, __mod, __base; \ - __base = (base); \ - asm("":"=a" (__low), "=d" (__high):"A" (n)); \ - __upper = __high; \ - if (__high) { \ - __upper = __high % (__base); \ - __high = __high / (__base); \ - } \ - asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ - asm("":"=A" (n):"a" (__low),"d" (__high)); \ - __mod; \ +#define do_div(n, base) \ +({ \ + unsigned long __upper, __low, __high, __mod, __base; \ + __base = (base); \ + asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ + __upper = __high; \ + if (__high) { \ + __upper = __high % (__base); \ + __high = __high / (__base); \ + } \ + asm("divl %2":"=a" (__low), "=d" (__mod) \ + : "rm" (__base), "0" (__low), "1" (__upper)); \ + asm("":"=A" (n) : "a" (__low), "d" (__high)); \ + __mod; \ }) /* @@ -37,14 +39,13 @@ * * Warning, this will do an exception if X overflows. */ -#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) +#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c) -static inline long -div_ll_X_l_rem(long long divs, long div, long *rem) +static inline long div_ll_X_l_rem(long long divs, long div, long *rem) { long dum2; - __asm__("divl %2":"=a"(dum2), "=d"(*rem) - : "rm"(div), "A"(divs)); + asm("divl %2":"=a"(dum2), "=d"(*rem) + : "rm"(div), "A"(divs)); return dum2; -- cgit v1.2.1 From 113cbebac7c88ce78a48801e159108ce6c9d1fb3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:00 -0700 Subject: include/asm-x86/dma.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/dma.h | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index e9733ce89880..ca1098a7e580 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h @@ -12,7 +12,6 @@ #include /* need byte IO */ #include - #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p #else @@ -74,15 +73,15 @@ #ifdef CONFIG_X86_32 /* The maximum address that we can perform a DMA transfer to on this platform */ -#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) +#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) #else /* 16MB ISA DMA zone */ -#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) +#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ -#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) +#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) /* Compat define for old dma zone */ #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) @@ -154,20 +153,20 @@ extern spinlock_t dma_spin_lock; -static __inline__ unsigned long claim_dma_lock(void) +static inline unsigned long claim_dma_lock(void) { unsigned long flags; spin_lock_irqsave(&dma_spin_lock, flags); return flags; } -static __inline__ void release_dma_lock(unsigned long flags) +static inline void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); } /* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) +static inline void enable_dma(unsigned int dmanr) { if (dmanr <= 3) dma_outb(dmanr, DMA1_MASK_REG); @@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr) dma_outb(dmanr & 3, DMA2_MASK_REG); } -static __inline__ void disable_dma(unsigned int dmanr) +static inline void disable_dma(unsigned int dmanr) { if (dmanr <= 3) dma_outb(dmanr | 4, DMA1_MASK_REG); @@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ -static __inline__ void clear_dma_ff(unsigned int dmanr) +static inline void clear_dma_ff(unsigned int dmanr) { if (dmanr <= 3) dma_outb(0, DMA1_CLEAR_FF_REG); @@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr) } /* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +static inline void set_dma_mode(unsigned int dmanr, char mode) { if (dmanr <= 3) dma_outb(mode | dmanr, DMA1_MODE_REG); @@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ -static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) +static inline void set_dma_page(unsigned int dmanr, char pagenr) { switch (dmanr) { case 0: @@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +static inline void set_dma_addr(unsigned int dmanr, unsigned int a) { set_dma_page(dmanr, a>>16); if (dmanr <= 3) { dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); } else { - dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); - dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); } } @@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +static inline void set_dma_count(unsigned int dmanr, unsigned int count) { count--; if (dmanr <= 3) { - dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); - dma_outb((count >> 8) & 0xff, - ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); } else { - dma_outb((count >> 1) & 0xff, - ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); - dma_outb((count >> 9) & 0xff, - ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 1) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); } } @@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) * * Assumes DMA flip-flop is clear. */ -static __inline__ int get_dma_residue(unsigned int dmanr) +static inline int get_dma_residue(unsigned int dmanr) { unsigned int io_port; /* using short to get 16-bit wrap around */ -- cgit v1.2.1 From c884534f7e74e017f96f81efc31f26ef2b15572e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:03 -0700 Subject: include/asm-x86/dwarf2_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/dwarf2_64.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h index eedc08526b0b..c950519a264d 100644 --- a/include/asm-x86/dwarf2_64.h +++ b/include/asm-x86/dwarf2_64.h @@ -1,16 +1,15 @@ #ifndef _DWARF2_H #define _DWARF2_H 1 - #ifndef __ASSEMBLY__ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif -/* +/* Macros for dwarf2 CFI unwind table entries. - See "as.info" for details on these pseudo ops. Unfortunately - they are only supported in very new binutils, so define them - away for older version. + See "as.info" for details on these pseudo ops. Unfortunately + they are only supported in very new binutils, so define them + away for older version. */ #ifdef CONFIG_AS_CFI -- cgit v1.2.1 From 1257d6e0408ba21d72dc3db9ef69d7287c74507b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:04 -0700 Subject: include/asm-x86/e820_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/e820_32.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index e7207a6de3e0..43b1a8bd4b34 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h @@ -34,8 +34,8 @@ extern void e820_register_memory(void); extern void limit_regions(unsigned long long size); extern void print_memory_map(char *who); extern void init_iomem_resources(struct resource *code_resource, - struct resource *data_resource, - struct resource *bss_resource); + struct resource *data_resource, + struct resource *bss_resource); #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) extern void e820_mark_nosave_regions(void); -- cgit v1.2.1 From 86bbc83526a4543db0a4ffed8e4cf679eddd534c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:05 -0700 Subject: include/asm-x86/e820_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/e820_64.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index d38820b31c1c..f478c57eb060 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -14,22 +14,24 @@ #include #ifndef __ASSEMBLY__ -extern unsigned long find_e820_area(unsigned long start, unsigned long end, +extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned long size, unsigned long align); extern unsigned long find_e820_area_size(unsigned long start, unsigned long *sizep, unsigned long align); -extern void add_memory_region(unsigned long start, unsigned long size, +extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void update_memory_range(u64 start, u64 size, unsigned old_type, unsigned new_type); extern void setup_memory_region(void); -extern void contig_e820_setup(void); +extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); extern void e820_reserve_resources(void); extern void e820_mark_nosave_regions(void); -extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); -extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); +extern int e820_any_mapped(unsigned long start, unsigned long end, + unsigned type); +extern int e820_all_mapped(unsigned long start, unsigned long end, + unsigned type); extern int e820_any_non_reserved(unsigned long start, unsigned long end); extern int is_memory_any_valid(unsigned long start, unsigned long end); extern int e820_all_non_reserved(unsigned long start, unsigned long end); @@ -37,8 +39,8 @@ extern int is_memory_all_valid(unsigned long start, unsigned long end); extern unsigned long e820_hole_size(unsigned long start, unsigned long end); extern void e820_setup_gap(void); -extern void e820_register_active_regions(int nid, - unsigned long start_pfn, unsigned long end_pfn); +extern void e820_register_active_regions(int nid, unsigned long start_pfn, + unsigned long end_pfn); extern void finish_e820_parsing(void); -- cgit v1.2.1 From 451dd9835898d4cc3c0ee8f9e4883807b760eb02 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:06 -0700 Subject: include/asm-x86/edac.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/edac.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h index cf3200a745ad..a8088f63a30e 100644 --- a/include/asm-x86/edac.h +++ b/include/asm-x86/edac.h @@ -3,7 +3,7 @@ /* ECC atomic, DMA, SMP and interrupt safe scrub function */ -static __inline__ void atomic_scrub(void *va, u32 size) +static inline void atomic_scrub(void *va, u32 size) { u32 i, *virt_addr = va; @@ -12,7 +12,7 @@ static __inline__ void atomic_scrub(void *va, u32 size) * are interrupt, DMA and SMP safe. */ for (i = 0; i < size / 4; i++, virt_addr++) - __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); + asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); } #endif -- cgit v1.2.1 From 0c6f6abf6e63227b1e6e22e412c3f63c0163a520 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:07 -0700 Subject: include/asm-x86/efi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/efi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index ea9734b74aca..d53004b855cc 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h @@ -20,7 +20,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); */ #define efi_call_virt(f, args...) \ - ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) + ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) #define efi_call_virt0(f) efi_call_virt(f) #define efi_call_virt1(f, a1) efi_call_virt(f, a1) -- cgit v1.2.1 From 486386f6c1b5144ccc8eb2f28def1712e1dd6c3d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:08 -0700 Subject: include/asm-x86/elf.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/elf.h | 107 ++++++++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 47 deletions(-) diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index 77325646e2f4..8f232dc5b5fe 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h @@ -11,7 +11,7 @@ typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; @@ -100,10 +100,11 @@ extern unsigned int vdso_enabled; We might as well make sure everything else is cleared too (except for %esp), just to make things more deterministic. */ -#define ELF_PLAT_INIT(_r, load_addr) do { \ - _r->bx = 0; _r->cx = 0; _r->dx = 0; \ - _r->si = 0; _r->di = 0; _r->bp = 0; \ - _r->ax = 0; \ +#define ELF_PLAT_INIT(_r, load_addr) \ + do { \ + _r->bx = 0; _r->cx = 0; _r->dx = 0; \ + _r->si = 0; _r->di = 0; _r->bp = 0; \ + _r->ax = 0; \ } while (0) /* @@ -111,24 +112,25 @@ extern unsigned int vdso_enabled; * now struct_user_regs, they are different) */ -#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ - pr_reg[0] = regs->bx; \ - pr_reg[1] = regs->cx; \ - pr_reg[2] = regs->dx; \ - pr_reg[3] = regs->si; \ - pr_reg[4] = regs->di; \ - pr_reg[5] = regs->bp; \ - pr_reg[6] = regs->ax; \ - pr_reg[7] = regs->ds & 0xffff; \ - pr_reg[8] = regs->es & 0xffff; \ - pr_reg[9] = regs->fs & 0xffff; \ - savesegment(gs, pr_reg[10]); \ - pr_reg[11] = regs->orig_ax; \ - pr_reg[12] = regs->ip; \ - pr_reg[13] = regs->cs & 0xffff; \ - pr_reg[14] = regs->flags; \ - pr_reg[15] = regs->sp; \ - pr_reg[16] = regs->ss & 0xffff; \ +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ +do { \ + pr_reg[0] = regs->bx; \ + pr_reg[1] = regs->cx; \ + pr_reg[2] = regs->dx; \ + pr_reg[3] = regs->si; \ + pr_reg[4] = regs->di; \ + pr_reg[5] = regs->bp; \ + pr_reg[6] = regs->ax; \ + pr_reg[7] = regs->ds & 0xffff; \ + pr_reg[8] = regs->es & 0xffff; \ + pr_reg[9] = regs->fs & 0xffff; \ + savesegment(gs, pr_reg[10]); \ + pr_reg[11] = regs->orig_ax; \ + pr_reg[12] = regs->ip; \ + pr_reg[13] = regs->cs & 0xffff; \ + pr_reg[14] = regs->flags; \ + pr_reg[15] = regs->sp; \ + pr_reg[16] = regs->ss & 0xffff; \ } while (0); #define ELF_PLATFORM (utsname()->machine) @@ -139,7 +141,7 @@ extern unsigned int vdso_enabled; /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch(x) \ +#define elf_check_arch(x) \ ((x)->e_machine == EM_X86_64) #define compat_elf_check_arch(x) elf_check_arch_ia32(x) @@ -168,24 +170,30 @@ static inline void elf_common_init(struct thread_struct *t, t->ds = t->es = ds; } -#define ELF_PLAT_INIT(_r, load_addr) do { \ - elf_common_init(¤t->thread, _r, 0); \ - clear_thread_flag(TIF_IA32); \ +#define ELF_PLAT_INIT(_r, load_addr) \ +do { \ + elf_common_init(¤t->thread, _r, 0); \ + clear_thread_flag(TIF_IA32); \ } while (0) -#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ +#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ elf_common_init(¤t->thread, regs, __USER_DS) -#define compat_start_thread(regs, ip, sp) do { \ - start_ia32_thread(regs, ip, sp); \ - set_fs(USER_DS); \ - } while (0) -#define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \ - if (test_thread_flag(TIF_IA32)) \ - clear_thread_flag(TIF_ABI_PENDING); \ - else \ - set_thread_flag(TIF_ABI_PENDING); \ - current->personality |= force_personality32; \ - } while (0) + +#define compat_start_thread(regs, ip, sp) \ +do { \ + start_ia32_thread(regs, ip, sp); \ + set_fs(USER_DS); \ +} while (0) + +#define COMPAT_SET_PERSONALITY(ex, ibcs2) \ +do { \ + if (test_thread_flag(TIF_IA32)) \ + clear_thread_flag(TIF_ABI_PENDING); \ + else \ + set_thread_flag(TIF_ABI_PENDING); \ + current->personality |= force_personality32; \ +} while (0) + #define COMPAT_ELF_PLATFORM ("i686") /* @@ -194,7 +202,8 @@ static inline void elf_common_init(struct thread_struct *t, * getting dumped. */ -#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ +do { \ unsigned v; \ (pr_reg)[0] = (regs)->r15; \ (pr_reg)[1] = (regs)->r14; \ @@ -268,10 +277,12 @@ extern int force_personality32; struct task_struct; -#define ARCH_DLINFO_IA32(vdso_enabled) \ -do if (vdso_enabled) { \ +#define ARCH_DLINFO_IA32(vdso_enabled) \ +do { \ + if (vdso_enabled) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ + } \ } while (0) #ifdef CONFIG_X86_32 @@ -289,9 +300,11 @@ do if (vdso_enabled) { \ /* 1GB for 64bit, 8MB for 32bit */ #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) -#define ARCH_DLINFO \ -do if (vdso_enabled) { \ - NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ +#define ARCH_DLINFO \ +do { \ + if (vdso_enabled) \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (unsigned long)current->mm->context.vdso); \ } while (0) #define AT_SYSINFO 32 @@ -304,8 +317,8 @@ do if (vdso_enabled) { \ #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) -#define VDSO_ENTRY \ - ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) +#define VDSO_ENTRY \ + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) struct linux_binprm; -- cgit v1.2.1 From cb7d0617b776b91a0643b8c5988f236414129b7d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:09 -0700 Subject: include/asm-x86/fixmap_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/fixmap_32.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index a7404d50686b..eb1665125c44 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h @@ -99,8 +99,7 @@ enum fixed_addresses { */ #define NR_FIX_BTMAPS 64 #define FIX_BTMAPS_NESTING 4 - FIX_BTMAP_END = - __end_of_permanent_fixed_addresses + 512 - + FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - (__end_of_permanent_fixed_addresses & 511), FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, FIX_WP_TEST, @@ -110,20 +109,20 @@ enum fixed_addresses { __end_of_fixed_addresses }; -extern void __set_fixmap (enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); +extern void __set_fixmap(enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); extern void reserve_top_address(unsigned long reserve); -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) -#define clear_fixmap(idx) \ - __set_fixmap(idx, 0, __pgprot(0)) +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) @@ -156,7 +155,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); - return __fix_to_virt(idx); + return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) -- cgit v1.2.1 From dca13fb0fc89d4214090a0ae14aeeaa50c42a6c4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:10 -0700 Subject: include/asm-x86/fixmap_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/fixmap_64.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index 70ddb21e6458..f3d76858c0e6 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h @@ -34,32 +34,34 @@ enum fixed_addresses { VSYSCALL_LAST_PAGE, - VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, + VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, FIX_DBGP_BASE, FIX_EARLYCON_MEM_BASE, FIX_HPET_BASE, FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, FIX_EFI_IO_MAP_LAST_PAGE, - FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1, + FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE + + MAX_EFI_IO_PAGES - 1, #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT FIX_OHCI1394_BASE, #endif __end_of_fixed_addresses }; -extern void __set_fixmap (enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); +extern void __set_fixmap(enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -- cgit v1.2.1 From 4637bc07c85621b0c10320da8cf3b34de83efa0f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:11 -0700 Subject: include/asm-x86/floppy.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/floppy.h | 87 +++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index a48d7153c097..438b3033a250 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h @@ -20,20 +20,21 @@ * driver otherwise. It doesn't matter much for performance anyway, as most * floppy accesses go through the track buffer. */ -#define _CROSS_64KB(a,s,vdma) \ -(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) +#define _CROSS_64KB(a, s, vdma) \ + (!(vdma) && \ + ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) -#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) +#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1) -#define SW fd_routine[use_virtual_dma&1] +#define SW fd_routine[use_virtual_dma & 1] #define CSW fd_routine[can_use_virtual_dma & 1] #define fd_inb(port) inb_p(port) -#define fd_outb(value,port) outb_p(value,port) +#define fd_outb(value, port) outb_p(value, port) -#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") +#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) @@ -57,15 +58,15 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) #undef TRACE_FLPY_INT #ifdef TRACE_FLPY_INT - static int calls=0; - static int bytes=0; - static int dma_wait=0; + static int calls; + static int bytes; + static int dma_wait; #endif if (!doing_pdma) return floppy_interrupt(irq, dev_id); #ifdef TRACE_FLPY_INT - if(!calls) + if (!calls) bytes = virtual_dma_count; #endif @@ -74,42 +75,42 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) register char *lptr; st = 1; - for(lcount=virtual_dma_count, lptr=virtual_dma_addr; - lcount; lcount--, lptr++) { - st=inb(virtual_dma_port+4) & 0xa0 ; - if(st != 0xa0) + for (lcount = virtual_dma_count, lptr = virtual_dma_addr; + lcount; lcount--, lptr++) { + st = inb(virtual_dma_port + 4) & 0xa0; + if (st != 0xa0) break; - if(virtual_dma_mode) - outb_p(*lptr, virtual_dma_port+5); + if (virtual_dma_mode) + outb_p(*lptr, virtual_dma_port + 5); else - *lptr = inb_p(virtual_dma_port+5); + *lptr = inb_p(virtual_dma_port + 5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; - st = inb(virtual_dma_port+4); + st = inb(virtual_dma_port + 4); } #ifdef TRACE_FLPY_INT calls++; #endif - if(st == 0x20) + if (st == 0x20) return IRQ_HANDLED; - if(!(st & 0x20)) { + if (!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; - virtual_dma_count=0; + virtual_dma_count = 0; #ifdef TRACE_FLPY_INT printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; - dma_wait=0; + dma_wait = 0; #endif doing_pdma = 0; floppy_interrupt(irq, dev_id); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT - if(!virtual_dma_count) + if (!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; @@ -117,14 +118,14 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) static void fd_disable_dma(void) { - if(! (can_use_virtual_dma & 1)) + if (!(can_use_virtual_dma & 1)) disable_dma(FLOPPY_DMA); doing_pdma = 0; virtual_dma_residue += virtual_dma_count; - virtual_dma_count=0; + virtual_dma_count = 0; } -static int vdma_request_dma(unsigned int dmanr, const char * device_id) +static int vdma_request_dma(unsigned int dmanr, const char *device_id) { return 0; } @@ -142,7 +143,7 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { - if(can_use_virtual_dma) + if (can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint, IRQF_DISABLED, "floppy", NULL); else @@ -152,13 +153,13 @@ static int fd_request_irq(void) static unsigned long dma_mem_alloc(unsigned long size) { - return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size)); + return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size)); } static unsigned long vdma_mem_alloc(unsigned long size) { - return (unsigned long) vmalloc(size); + return (unsigned long)vmalloc(size); } @@ -166,7 +167,7 @@ static unsigned long vdma_mem_alloc(unsigned long size) static void _fd_dma_mem_free(unsigned long addr, unsigned long size) { - if((unsigned long) addr >= (unsigned long) high_memory) + if ((unsigned long)addr >= (unsigned long)high_memory) vfree((void *)addr); else free_pages(addr, get_order(size)); @@ -176,10 +177,10 @@ static void _fd_dma_mem_free(unsigned long addr, unsigned long size) static void _fd_chose_dma_mode(char *addr, unsigned long size) { - if(can_use_virtual_dma == 2) { - if((unsigned long) addr >= (unsigned long) high_memory || - isa_virt_to_bus(addr) >= 0x1000000 || - _CROSS_64KB(addr, size, 0)) + if (can_use_virtual_dma == 2) { + if ((unsigned long)addr >= (unsigned long)high_memory || + isa_virt_to_bus(addr) >= 0x1000000 || + _CROSS_64KB(addr, size, 0)) use_virtual_dma = 1; else use_virtual_dma = 0; @@ -195,7 +196,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; - virtual_dma_mode = (mode == DMA_MODE_WRITE); + virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; @@ -213,18 +214,18 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) /* actual, physical DMA */ doing_pdma = 0; clear_dma_ff(FLOPPY_DMA); - set_dma_mode(FLOPPY_DMA,mode); - set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); - set_dma_count(FLOPPY_DMA,size); + set_dma_mode(FLOPPY_DMA, mode); + set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr)); + set_dma_count(FLOPPY_DMA, size); enable_dma(FLOPPY_DMA); return 0; } static struct fd_routine_l { - int (*_request_dma)(unsigned int dmanr, const char * device_id); + int (*_request_dma)(unsigned int dmanr, const char *device_id); void (*_free_dma)(unsigned int dmanr); int (*_get_dma_residue)(unsigned int dummy); - unsigned long (*_dma_mem_alloc) (unsigned long size); + unsigned long (*_dma_mem_alloc)(unsigned long size); int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); } fd_routine[] = { { @@ -252,7 +253,8 @@ static int FDC2 = -1; * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ -#define FLOPPY0_TYPE ({ \ +#define FLOPPY0_TYPE \ +({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ @@ -261,7 +263,8 @@ static int FDC2 = -1; val; \ }) -#define FLOPPY1_TYPE ({ \ +#define FLOPPY1_TYPE \ +({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ -- cgit v1.2.1 From 9407913fc16dde0e632b9639557422c6a792469d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:12 -0700 Subject: include/asm-x86/futex.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/futex.h | 101 ++++++++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 46 deletions(-) diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index c9952ea9f698..ac0fbf24d722 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h @@ -12,35 +12,32 @@ #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile( \ -"1: " insn "\n" \ -"2: .section .fixup,\"ax\"\n \ -3: mov %3, %1\n \ - jmp 2b\n \ - .previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ - : "i" (-EFAULT), "0" (oparg), "1" (0)) + asm volatile("1:\t" insn "\n" \ + "2:\t.section .fixup,\"ax\"\n" \ + "3:\tmov\t%3, %1\n" \ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "i" (-EFAULT), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile( \ -"1: movl %2, %0\n \ - movl %0, %3\n" \ - insn "\n" \ -"2: lock; cmpxchgl %3, %2\n \ - jnz 1b\n \ -3: .section .fixup,\"ax\"\n \ -4: mov %5, %1\n \ - jmp 3b\n \ - .previous\n" \ - _ASM_EXTABLE(1b,4b) \ - _ASM_EXTABLE(2b,4b) \ - : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ - "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) - -static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) + asm volatile("1:\tmovl %2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ + "2:\tlock; cmpxchgl %3, %2\n" \ + "\tjnz\t1b\n" \ + "3:\t.section .fixup,\"ax\"\n" \ + "4:\tmov\t%5, %1\n" \ + "\tjmp\t3b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ + "+m" (*uaddr), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -87,20 +84,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (!ret) { switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; + case FUTEX_OP_CMP_EQ: + ret = (oldval == cmparg); + break; + case FUTEX_OP_CMP_NE: + ret = (oldval != cmparg); + break; + case FUTEX_OP_CMP_LT: + ret = (oldval < cmparg); + break; + case FUTEX_OP_CMP_GE: + ret = (oldval >= cmparg); + break; + case FUTEX_OP_CMP_LE: + ret = (oldval <= cmparg); + break; + case FUTEX_OP_CMP_GT: + ret = (oldval > cmparg); + break; + default: + ret = -ENOSYS; } } return ret; } -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, + int newval) { #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) @@ -112,16 +122,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - __asm__ __volatile__( - "1: lock; cmpxchgl %3, %1 \n" - "2: .section .fixup, \"ax\" \n" - "3: mov %2, %0 \n" - " jmp 2b \n" - " .previous \n" - _ASM_EXTABLE(1b,3b) - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) - : "memory" + asm volatile("1:\tlock; cmpxchgl %3, %1\n" + "2:\t.section .fixup, \"ax\"\n" + "3:\tmov %2, %0\n" + "\tjmp 2b\n" + "\t.previous\n" + _ASM_EXTABLE(1b, 3b) + : "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" ); return oldval; -- cgit v1.2.1 From eee28c251ed8ef2c6b66f0e08e9467a8cc5cf886 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:13 -0700 Subject: include/asm-x86/genapic_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/genapic_32.h | 89 ++++++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 44 deletions(-) diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index b501ae7809ba..5fa893dce729 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -18,18 +18,18 @@ struct mpc_config_bus; struct mp_config_table; struct mpc_config_processor; -struct genapic { - char *name; - int (*probe)(void); +struct genapic { + char *name; + int (*probe)(void); int (*apic_id_registered)(void); cpumask_t (*target_cpus)(void); int int_delivery_mode; - int int_dest_mode; + int int_dest_mode; int ESR_DISABLE; int apic_destination_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); + unsigned long (*check_apicid_present)(int apicid); int no_balance_irq; int no_ioapic_check; void (*init_apic_ldr)(void); @@ -37,21 +37,21 @@ struct genapic { void (*setup_apic_routing)(void); int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); + int (*apicid_to_node)(int logical_apicid); int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - void (*setup_portio_remap)(void); + void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); /* mpparse */ /* When one of the next two hooks returns 1 the genapic - is switched to this. Essentially they are additional probe + is switched to this. Essentially they are additional probe functions. */ - int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, - char *productid); + int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, + char *productid); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); unsigned (*get_apic_id)(unsigned long x); @@ -64,7 +64,7 @@ struct genapic { void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); #endif -}; +}; #define APICFUNC(x) .x = x, @@ -77,39 +77,40 @@ struct genapic { #define IPIFUNC(x) #endif -#define APIC_INIT(aname, aprobe) { \ - .name = aname, \ - .probe = aprobe, \ - .int_delivery_mode = INT_DELIVERY_MODE, \ - .int_dest_mode = INT_DEST_MODE, \ - .no_balance_irq = NO_BALANCE_IRQ, \ - .ESR_DISABLE = esr_disable, \ - .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered) \ - APICFUNC(target_cpus) \ - APICFUNC(check_apicid_used) \ - APICFUNC(check_apicid_present) \ - APICFUNC(init_apic_ldr) \ - APICFUNC(ioapic_phys_id_map) \ - APICFUNC(setup_apic_routing) \ - APICFUNC(multi_timer_check) \ - APICFUNC(apicid_to_node) \ - APICFUNC(cpu_to_logical_apicid) \ - APICFUNC(cpu_present_to_apicid) \ - APICFUNC(apicid_to_cpu_present) \ - APICFUNC(setup_portio_remap) \ - APICFUNC(check_phys_apicid_present) \ - APICFUNC(mps_oem_check) \ - APICFUNC(get_apic_id) \ - .apic_id_mask = APIC_ID_MASK, \ - APICFUNC(cpu_mask_to_apicid) \ - APICFUNC(acpi_madt_oem_check) \ - IPIFUNC(send_IPI_mask) \ - IPIFUNC(send_IPI_allbutself) \ - IPIFUNC(send_IPI_all) \ - APICFUNC(enable_apic_mode) \ - APICFUNC(phys_pkg_id) \ - } +#define APIC_INIT(aname, aprobe) \ +{ \ + .name = aname, \ + .probe = aprobe, \ + .int_delivery_mode = INT_DELIVERY_MODE, \ + .int_dest_mode = INT_DEST_MODE, \ + .no_balance_irq = NO_BALANCE_IRQ, \ + .ESR_DISABLE = esr_disable, \ + .apic_destination_logical = APIC_DEST_LOGICAL, \ + APICFUNC(apic_id_registered) \ + APICFUNC(target_cpus) \ + APICFUNC(check_apicid_used) \ + APICFUNC(check_apicid_present) \ + APICFUNC(init_apic_ldr) \ + APICFUNC(ioapic_phys_id_map) \ + APICFUNC(setup_apic_routing) \ + APICFUNC(multi_timer_check) \ + APICFUNC(apicid_to_node) \ + APICFUNC(cpu_to_logical_apicid) \ + APICFUNC(cpu_present_to_apicid) \ + APICFUNC(apicid_to_cpu_present) \ + APICFUNC(setup_portio_remap) \ + APICFUNC(check_phys_apicid_present) \ + APICFUNC(mps_oem_check) \ + APICFUNC(get_apic_id) \ + .apic_id_mask = APIC_ID_MASK, \ + APICFUNC(cpu_mask_to_apicid) \ + APICFUNC(acpi_madt_oem_check) \ + IPIFUNC(send_IPI_mask) \ + IPIFUNC(send_IPI_allbutself) \ + IPIFUNC(send_IPI_all) \ + APICFUNC(enable_apic_mode) \ + APICFUNC(phys_pkg_id) \ +} extern struct genapic *genapic; -- cgit v1.2.1 From 6394d982a96a4b8f7973dea1d9d5a99dbe1847f7 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:14 -0700 Subject: include/asm-x86/geode.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/geode.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 9e7280092a48..9870cc1f2f8f 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -167,7 +167,7 @@ static inline int is_geode(void) /* MFGPTs */ #define MFGPT_MAX_TIMERS 8 -#define MFGPT_TIMER_ANY -1 +#define MFGPT_TIMER_ANY (-1) #define MFGPT_DOMAIN_WORKING 1 #define MFGPT_DOMAIN_STANDBY 2 -- cgit v1.2.1 From b9c2fcf0eb6190f942312a6e04f91a80efb91b72 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:15 -0700 Subject: include/asm-x86/highmem.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/highmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 479767c9195f..e153f3b44774 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h @@ -8,7 +8,7 @@ * Gerhard.Wichert@pdb.siemens.de * * - * Redesigned the x86 32-bit VM architecture to deal with + * Redesigned the x86 32-bit VM architecture to deal with * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * -- cgit v1.2.1 From dbdab9f6308645a4462e81c607d6282bcb27141d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:16 -0700 Subject: include/asm-x86/hw_irq_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/hw_irq_64.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index 312a58d6dac6..0062ef390f67 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h @@ -36,7 +36,7 @@ * cleanup after irq migration. */ #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR - + /* * Vectors 0x30-0x3f are used for ISA interrupts. */ @@ -159,13 +159,12 @@ extern atomic_t irq_mis_count; * SMP has a few special interrupts for IPI messages */ -#define BUILD_IRQ(nr) \ -asmlinkage void IRQ_NAME(nr); \ -__asm__( \ -"\n.p2align\n" \ -"IRQ" #nr "_interrupt:\n\t" \ - "push $~(" #nr ") ; " \ - "jmp common_interrupt"); +#define BUILD_IRQ(nr) \ + asmlinkage void IRQ_NAME(nr); \ + asm("\n.p2align\n" \ + "IRQ" #nr "_interrupt:\n\t" \ + "push $~(" #nr ") ; " \ + "jmp common_interrupt"); #define platform_legacy_irq(irq) ((irq) < 16) -- cgit v1.2.1 From e574b023cf03f796727ea654acfc16a0df805086 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:17 -0700 Subject: include/asm-x86/hypertransport.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/hypertransport.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h index c16c6ff4bdd7..d2bbd238b3e1 100644 --- a/include/asm-x86/hypertransport.h +++ b/include/asm-x86/hypertransport.h @@ -8,12 +8,14 @@ #define HT_IRQ_LOW_BASE 0xf8000000 #define HT_IRQ_LOW_VECTOR_SHIFT 16 -#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 -#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) +#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 +#define HT_IRQ_LOW_VECTOR(v) \ + (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) #define HT_IRQ_LOW_DEST_ID_SHIFT 8 -#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 -#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) +#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 +#define HT_IRQ_LOW_DEST_ID(v) \ + (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 @@ -36,7 +38,8 @@ #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 -#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff -#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) +#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff +#define HT_IRQ_HIGH_DEST_ID(v) \ + ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) #endif /* ASM_HYPERTRANSPORT_H */ -- cgit v1.2.1 From affe66374ca16572fbb22cefe267d6072c49ce9d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:18 -0700 Subject: include/asm-x86/i387.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/i387.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index f377b76b2f34..54522b814f1c 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -41,7 +41,7 @@ static inline void tolerant_fwait(void) { asm volatile("1: fwait\n" "2:\n" - _ASM_EXTABLE(1b,2b)); + _ASM_EXTABLE(1b, 2b)); } static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) @@ -54,7 +54,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) + _ASM_EXTABLE(1b, 3b) : [err] "=r" (err) #if 0 /* See comment in __save_init_fpu() below. */ : [fx] "r" (fx), "m" (*fx), "0" (0)); @@ -76,11 +76,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) static inline void clear_fpu_state(struct i387_fxsave_struct *fx) { if (unlikely(fx->swd & X87_FSW_ES)) - asm volatile("fnclex"); + asm volatile("fnclex"); alternative_input(ASM_NOP8 ASM_NOP2, - " emms\n" /* clear stack tags */ - " fildl %%gs:0", /* load to clear state */ - X86_FEATURE_FXSAVE_LEAK); + " emms\n" /* clear stack tags */ + " fildl %%gs:0", /* load to clear state */ + X86_FEATURE_FXSAVE_LEAK); } static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) @@ -93,14 +93,15 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) + _ASM_EXTABLE(1b, 3b) : [err] "=r" (err), "=m" (*fx) #if 0 /* See comment in __fxsave_clear() below. */ : [fx] "r" (fx), "0" (0)); #else : [fx] "cdaSDb" (fx), "0" (0)); #endif - if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) + if (unlikely(err) && + __clear_user(fx, sizeof(struct i387_fxsave_struct))) err = -EFAULT; /* No need to clear here because the caller clears USED_MATH */ return err; @@ -156,8 +157,10 @@ static inline int save_i387(struct _fpstate __user *buf) return 0; clear_used_math(); /* trigger finit */ if (task_thread_info(tsk)->status & TS_USEDFPU) { - err = save_i387_checking((struct i387_fxsave_struct __user *)buf); - if (err) return err; + err = save_i387_checking((struct i387_fxsave_struct __user *) + buf); + if (err) + return err; task_thread_info(tsk)->status &= ~TS_USEDFPU; stts(); } else { -- cgit v1.2.1 From ace4fdbb2fa842fc557bc0ae5909d790eb335ff8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:19 -0700 Subject: include/asm-x86/i8259.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/i8259.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index e2650f21ca8d..45d4df3e51e6 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h @@ -5,7 +5,7 @@ extern unsigned int cached_irq_mask; -#define __byte(x,y) (((unsigned char *) &(y))[x]) +#define __byte(x, y) (((unsigned char *)&(y))[x]) #define cached_master_mask (__byte(0, cached_irq_mask)) #define cached_slave_mask (__byte(1, cached_irq_mask)) -- cgit v1.2.1 From 9bd73425142a610ae72c5e2b89e036e5214bb6ca Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:20 -0700 Subject: include/asm-x86/ia32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ia32.h | 62 ++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index aa9733206e29..55d3abe5276f 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h @@ -14,19 +14,19 @@ /* signal.h */ struct sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal - with 32 bits */ - unsigned int sa_flags; - unsigned int sa_restorer; /* Another 32 bit pointer */ - compat_sigset_t sa_mask; /* A 32 bit mask */ + unsigned int sa_handler; /* Really a pointer, but need to deal + with 32 bits */ + unsigned int sa_flags; + unsigned int sa_restorer; /* Another 32 bit pointer */ + compat_sigset_t sa_mask; /* A 32 bit mask */ }; struct old_sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal - with 32 bits */ - compat_old_sigset_t sa_mask; /* A 32 bit mask */ - unsigned int sa_flags; - unsigned int sa_restorer; /* Another 32 bit pointer */ + unsigned int sa_handler; /* Really a pointer, but need to deal + with 32 bits */ + compat_old_sigset_t sa_mask; /* A 32 bit mask */ + unsigned int sa_flags; + unsigned int sa_restorer; /* Another 32 bit pointer */ }; typedef struct sigaltstack_ia32 { @@ -65,7 +65,7 @@ struct stat64 { long long st_size; unsigned int st_blksize; - long long st_blocks;/* Number 512-byte blocks allocated. */ + long long st_blocks;/* Number 512-byte blocks allocated */ unsigned st_atime; unsigned st_atime_nsec; @@ -77,13 +77,13 @@ struct stat64 { unsigned long long st_ino; } __attribute__((packed)); -typedef struct compat_siginfo{ +typedef struct compat_siginfo { int si_signo; int si_errno; int si_code; union { - int _pad[((128/sizeof(int)) - 3)]; + int _pad[((128 / sizeof(int)) - 3)]; /* kill() */ struct { @@ -129,28 +129,26 @@ typedef struct compat_siginfo{ } _sifields; } compat_siginfo_t; -struct sigframe32 -{ - u32 pretcode; - int sig; - struct sigcontext_ia32 sc; - struct _fpstate_ia32 fpstate; - unsigned int extramask[_COMPAT_NSIG_WORDS-1]; +struct sigframe32 { + u32 pretcode; + int sig; + struct sigcontext_ia32 sc; + struct _fpstate_ia32 fpstate; + unsigned int extramask[_COMPAT_NSIG_WORDS-1]; }; -struct rt_sigframe32 -{ - u32 pretcode; - int sig; - u32 pinfo; - u32 puc; - compat_siginfo_t info; - struct ucontext_ia32 uc; - struct _fpstate_ia32 fpstate; +struct rt_sigframe32 { + u32 pretcode; + int sig; + u32 pinfo; + u32 puc; + compat_siginfo_t info; + struct ucontext_ia32 uc; + struct _fpstate_ia32 fpstate; }; struct ustat32 { - __u32 f_tfree; + __u32 f_tfree; compat_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; @@ -168,5 +166,5 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm); #endif #endif /* !CONFIG_IA32_SUPPORT */ - -#endif + +#endif -- cgit v1.2.1 From dcd215c9089c1203eed6f84dde2eb2cc0bbf9501 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:22 -0700 Subject: include/asm-x86/io_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/io_32.h | 137 +++++++++++++++++++++++++++++------------------- 1 file changed, 83 insertions(+), 54 deletions(-) diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index d4d8fbd9378c..509045f5fda2 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -65,14 +65,14 @@ * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on - * addresses directly mapped or allocated via kmalloc. + * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ - -static inline unsigned long virt_to_phys(volatile void * address) + +static inline unsigned long virt_to_phys(volatile void *address) { return __pa(address); } @@ -90,7 +90,7 @@ static inline unsigned long virt_to_phys(volatile void * address) * this function */ -static inline void * phys_to_virt(unsigned long address) +static inline void *phys_to_virt(unsigned long address) { return __va(address); } @@ -169,16 +169,19 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); static inline unsigned char readb(const volatile void __iomem *addr) { - return *(volatile unsigned char __force *) addr; + return *(volatile unsigned char __force *)addr; } + static inline unsigned short readw(const volatile void __iomem *addr) { - return *(volatile unsigned short __force *) addr; + return *(volatile unsigned short __force *)addr; } + static inline unsigned int readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *) addr; } + #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) @@ -188,15 +191,17 @@ static inline unsigned int readl(const volatile void __iomem *addr) static inline void writeb(unsigned char b, volatile void __iomem *addr) { - *(volatile unsigned char __force *) addr = b; + *(volatile unsigned char __force *)addr = b; } + static inline void writew(unsigned short b, volatile void __iomem *addr) { - *(volatile unsigned short __force *) addr = b; + *(volatile unsigned short __force *)addr = b; } + static inline void writel(unsigned int b, volatile void __iomem *addr) { - *(volatile unsigned int __force *) addr = b; + *(volatile unsigned int __force *)addr = b; } #define __raw_writeb writeb #define __raw_writew writew @@ -239,12 +244,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count) * 1. Out of order aware processors * 2. Accidentally out of order processors (PPro errata #51) */ - + #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) static inline void flush_write_buffers(void) { - __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); + asm volatile("lock; addl $0,0(%%esp)": : :"memory"); } #else @@ -264,7 +269,8 @@ extern void io_delay_init(void); #include #else -static inline void slow_down_io(void) { +static inline void slow_down_io(void) +{ native_io_delay(); #ifdef REALLY_SLOW_IO native_io_delay(); @@ -275,51 +281,74 @@ static inline void slow_down_io(void) { #endif -#define __BUILDIO(bwl,bw,type) \ -static inline void out##bwl(unsigned type value, int port) { \ - out##bwl##_local(value, port); \ -} \ -static inline unsigned type in##bwl(int port) { \ - return in##bwl##_local(port); \ +#define __BUILDIO(bwl, bw, type) \ +static inline void out##bwl(unsigned type value, int port) \ +{ \ + out##bwl##_local(value, port); \ +} \ + \ +static inline unsigned type in##bwl(int port) \ +{ \ + return in##bwl##_local(port); \ } -#define BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_local(unsigned type value, int port) { \ - __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ -} \ -static inline unsigned type in##bwl##_local(int port) { \ - unsigned type value; \ - __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ - return value; \ -} \ -static inline void out##bwl##_local_p(unsigned type value, int port) { \ - out##bwl##_local(value, port); \ - slow_down_io(); \ -} \ -static inline unsigned type in##bwl##_local_p(int port) { \ - unsigned type value = in##bwl##_local(port); \ - slow_down_io(); \ - return value; \ -} \ -__BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_p(unsigned type value, int port) { \ - out##bwl(value, port); \ - slow_down_io(); \ -} \ -static inline unsigned type in##bwl##_p(int port) { \ - unsigned type value = in##bwl(port); \ - slow_down_io(); \ - return value; \ -} \ -static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ - __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ -} \ -static inline void ins##bwl(int port, void *addr, unsigned long count) { \ - __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ +#define BUILDIO(bwl, bw, type) \ +static inline void out##bwl##_local(unsigned type value, int port) \ +{ \ + asm volatile("out" #bwl " %" #bw "0, %w1" \ + : : "a"(value), "Nd"(port)); \ +} \ + \ +static inline unsigned type in##bwl##_local(int port) \ +{ \ + unsigned type value; \ + asm volatile("in" #bwl " %w1, %" #bw "0" \ + : "=a"(value) : "Nd"(port)); \ + return value; \ +} \ + \ +static inline void out##bwl##_local_p(unsigned type value, int port) \ +{ \ + out##bwl##_local(value, port); \ + slow_down_io(); \ +} \ + \ +static inline unsigned type in##bwl##_local_p(int port) \ +{ \ + unsigned type value = in##bwl##_local(port); \ + slow_down_io(); \ + return value; \ +} \ + \ +__BUILDIO(bwl, bw, type) \ + \ +static inline void out##bwl##_p(unsigned type value, int port) \ +{ \ + out##bwl(value, port); \ + slow_down_io(); \ +} \ + \ +static inline unsigned type in##bwl##_p(int port) \ +{ \ + unsigned type value = in##bwl(port); \ + slow_down_io(); \ + return value; \ +} \ + \ +static inline void outs##bwl(int port, const void *addr, unsigned long count) \ +{ \ + asm volatile("rep; outs" #bwl \ + : "+S"(addr), "+c"(count) : "d"(port)); \ +} \ + \ +static inline void ins##bwl(int port, void *addr, unsigned long count) \ +{ \ + asm volatile("rep; ins" #bwl \ + : "+D"(addr), "+c"(count) : "d"(port)); \ } -BUILDIO(b,b,char) -BUILDIO(w,w,short) -BUILDIO(l,,int) +BUILDIO(b, b, char) +BUILDIO(w, w, short) +BUILDIO(l, , int) #endif -- cgit v1.2.1 From 126f5f35a36b5734026db58263fc1d60e4ae466f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:23 -0700 Subject: include/asm-x86/io_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/io_64.h | 110 ++++++++++++++++++++++++++++++------------------ 1 file changed, 68 insertions(+), 42 deletions(-) diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index db0be2011a3c..c2f5eef47b88 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h @@ -58,60 +58,75 @@ static inline void slow_down_io(void) /* * Talk about misusing macros.. */ -#define __OUT1(s,x) \ +#define __OUT1(s, x) \ static inline void out##s(unsigned x value, unsigned short port) { -#define __OUT2(s,s1,s2) \ -__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" +#define __OUT2(s, s1, s2) \ +asm volatile ("out" #s " %" s1 "0,%" s2 "1" #ifndef REALLY_SLOW_IO #define REALLY_SLOW_IO #define UNSET_REALLY_SLOW_IO #endif -#define __OUT(s,s1,x) \ -__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ -__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ - slow_down_io(); } +#define __OUT(s, s1, x) \ + __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ + } \ + __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ + slow_down_io(); \ +} -#define __IN1(s) \ -static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; +#define __IN1(s) \ +static inline RETURN_TYPE in##s(unsigned short port) \ +{ \ + RETURN_TYPE _v; -#define __IN2(s,s1,s2) \ -__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" +#define __IN2(s, s1, s2) \ + asm volatile ("in" #s " %" s2 "1,%" s1 "0" -#define __IN(s,s1,i...) \ -__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \ -__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ - slow_down_io(); return _v; } +#define __IN(s, s1, i...) \ + __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ + return _v; \ + } \ + __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ + slow_down_io(); \ + return _v; } #ifdef UNSET_REALLY_SLOW_IO #undef REALLY_SLOW_IO #endif -#define __INS(s) \ -static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ -{ __asm__ __volatile__ ("rep ; ins" #s \ -: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } +#define __INS(s) \ +static inline void ins##s(unsigned short port, void *addr, \ + unsigned long count) \ +{ \ + asm volatile ("rep ; ins" #s \ + : "=D" (addr), "=c" (count) \ + : "d" (port), "0" (addr), "1" (count)); \ +} -#define __OUTS(s) \ -static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ -{ __asm__ __volatile__ ("rep ; outs" #s \ -: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } +#define __OUTS(s) \ +static inline void outs##s(unsigned short port, const void *addr, \ + unsigned long count) \ +{ \ + asm volatile ("rep ; outs" #s \ + : "=S" (addr), "=c" (count) \ + : "d" (port), "0" (addr), "1" (count)); \ +} #define RETURN_TYPE unsigned char -__IN(b,"") +__IN(b, "") #undef RETURN_TYPE #define RETURN_TYPE unsigned short -__IN(w,"") +__IN(w, "") #undef RETURN_TYPE #define RETURN_TYPE unsigned int -__IN(l,"") +__IN(l, "") #undef RETURN_TYPE -__OUT(b,"b",char) -__OUT(w,"w",short) -__OUT(l,,int) +__OUT(b, "b", char) +__OUT(w, "w", short) +__OUT(l, , int) __INS(b) __INS(w) @@ -132,12 +147,12 @@ __OUTS(l) * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ -static inline unsigned long virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile void *address) { return __pa(address); } -static inline void * phys_to_virt(unsigned long address) +static inline void *phys_to_virt(unsigned long address) { return __va(address); } @@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr) { return *(__force volatile __u8 *)addr; } + static inline __u16 __readw(const volatile void __iomem *addr) { return *(__force volatile __u16 *)addr; } + static __always_inline __u32 __readl(const volatile void __iomem *addr) { return *(__force volatile __u32 *)addr; } + static inline __u64 __readq(const volatile void __iomem *addr) { return *(__force volatile __u64 *)addr; } + #define readb(x) __readb(x) #define readw(x) __readw(x) #define readl(x) __readl(x) @@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr) { *(__force volatile __u32 *)addr = b; } + static inline void __writeq(__u64 b, volatile void __iomem *addr) { *(__force volatile __u64 *)addr = b; } + static inline void __writeb(__u8 b, volatile void __iomem *addr) { *(__force volatile __u8 *)addr = b; } + static inline void __writew(__u16 b, volatile void __iomem *addr) { *(__force volatile __u16 *)addr = b; } -#define writeq(val,addr) __writeq((val),(addr)) -#define writel(val,addr) __writel((val),(addr)) -#define writew(val,addr) __writew((val),(addr)) -#define writeb(val,addr) __writeb((val),(addr)) + +#define writeq(val, addr) __writeq((val), (addr)) +#define writel(val, addr) __writel((val), (addr)) +#define writew(val, addr) __writew((val), (addr)) +#define writeb(val, addr) __writeb((val), (addr)) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define __raw_writeq writeq -void __memcpy_fromio(void*,unsigned long,unsigned); -void __memcpy_toio(unsigned long,const void*,unsigned); +void __memcpy_fromio(void *, unsigned long, unsigned); +void __memcpy_toio(unsigned long, const void *, unsigned); -static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) +static inline void memcpy_fromio(void *to, const volatile void __iomem *from, + unsigned len) { - __memcpy_fromio(to,(unsigned long)from,len); + __memcpy_fromio(to, (unsigned long)from, len); } -static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) + +static inline void memcpy_toio(volatile void __iomem *to, const void *from, + unsigned len) { - __memcpy_toio((unsigned long)to,from,len); + __memcpy_toio((unsigned long)to, from, len); } void memset_io(volatile void __iomem *a, int b, size_t c); @@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c); */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) -#define flush_write_buffers() +#define flush_write_buffers() extern int iommu_bio_merge; #define BIO_VMERGE_BOUNDARY iommu_bio_merge -- cgit v1.2.1 From 88b4f6e98f79de31f49036da5bb1482872d512b5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:24 -0700 Subject: include/asm-x86/ioctls.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ioctls.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h index 93c894dc5154..c0c338bd4068 100644 --- a/include/asm-x86/ioctls.h +++ b/include/asm-x86/ioctls.h @@ -47,12 +47,13 @@ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TCGETS2 _IOR('T',0x2A, struct termios2) -#define TCSETS2 _IOW('T',0x2B, struct termios2) -#define TCSETSW2 _IOW('T',0x2C, struct termios2) -#define TCSETSF2 _IOW('T',0x2D, struct termios2) -#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TCGETS2 _IOR('T', 0x2A, struct termios2) +#define TCSETS2 _IOW('T', 0x2B, struct termios2) +#define TCSETSW2 _IOW('T', 0x2C, struct termios2) +#define TCSETSF2 _IOW('T', 0x2D, struct termios2) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) + /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define FIONCLEX 0x5450 #define FIOCLEX 0x5451 -- cgit v1.2.1 From 1774a5bed33d85f250657f397b359b9837390a90 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:25 -0700 Subject: include/asm-x86/io.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 599cad3505c1..7b292d386713 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -7,5 +7,5 @@ #endif extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val); -extern void __iomem * ioremap_wc(unsigned long offset, unsigned long size); +extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); -- cgit v1.2.1 From 5ca22aaad9ea078306537b2ef6fb788d931e8502 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:26 -0700 Subject: include/asm-x86/ipcbuf.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ipcbuf.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h index 2adf8b39a40b..ee678fd51594 100644 --- a/include/asm-x86/ipcbuf.h +++ b/include/asm-x86/ipcbuf.h @@ -11,8 +11,7 @@ * - 2 miscellaneous 32-bit values */ -struct ipc64_perm -{ +struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; -- cgit v1.2.1 From 061b3d90bc3490af41c6756189b59713cd9ecaee Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:27 -0700 Subject: include/asm-x86/ipi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ipi.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index 6d011bd6067d..ecc80f341f37 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h @@ -27,7 +27,8 @@ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. */ -static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) +static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, + unsigned int dest) { unsigned int icr = shortcut | dest; @@ -42,12 +43,13 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns return icr; } -static inline int __prepare_ICR2 (unsigned int mask) +static inline int __prepare_ICR2(unsigned int mask) { return SET_APIC_DEST_FIELD(mask); } -static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) +static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, + unsigned int dest) { /* * Subtle. In the case of the 'never do double writes' workaround @@ -78,7 +80,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign * This is used to send an IPI with no shorthand notation (the destination is * specified in bits 56 to 63 of the ICR). */ -static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) +static inline void __send_IPI_dest_field(unsigned int mask, int vector, + unsigned int dest) { unsigned long cfg; -- cgit v1.2.1 From f4964d2ac51fcc8cd56975139422cdbb3a1e66dc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:28 -0700 Subject: include/asm-x86/irq_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/irq_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h index aca9c96e8e6b..0b79f3185243 100644 --- a/include/asm-x86/irq_32.h +++ b/include/asm-x86/irq_32.h @@ -15,7 +15,7 @@ #include "irq_vectors.h" #include -static __inline__ int irq_canonicalize(int irq) +static inline int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } -- cgit v1.2.1 From 3ff3522497d7ad383df9770e812fbaf75d19b214 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:29 -0700 Subject: include/asm-x86/irq_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/irq_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h index 5006c6e75656..083d35a62c94 100644 --- a/include/asm-x86/irq_64.h +++ b/include/asm-x86/irq_64.h @@ -31,10 +31,10 @@ #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ -#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS)) +#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) #define NR_IRQ_VECTORS NR_IRQS -static __inline__ int irq_canonicalize(int irq) +static inline int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } -- cgit v1.2.1 From cf7f7191cf20011e47243b594e433275a6db811b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:30 -0700 Subject: include/asm-x86/irqflags.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/irqflags.h | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 92021c1ffa3a..c242527f970e 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -12,25 +12,21 @@ static inline unsigned long native_save_fl(void) { unsigned long flags; - __asm__ __volatile__( - "# __raw_save_flags\n\t" - "pushf ; pop %0" - : "=g" (flags) - : /* no input */ - : "memory" - ); + asm volatile("# __raw_save_flags\n\t" + "pushf ; pop %0" + : "=g" (flags) + : /* no input */ + : "memory"); return flags; } static inline void native_restore_fl(unsigned long flags) { - __asm__ __volatile__( - "push %0 ; popf" - : /* no output */ - :"g" (flags) - :"memory", "cc" - ); + asm volatile("push %0 ; popf" + : /* no output */ + :"g" (flags) + :"memory", "cc"); } static inline void native_irq_disable(void) @@ -131,11 +127,11 @@ static inline unsigned long __raw_local_irq_save(void) #endif /* CONFIG_PARAVIRT */ #ifndef __ASSEMBLY__ -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) +#define raw_local_irq_save(flags) \ + do { (flags) = __raw_local_irq_save(); } while (0) static inline int raw_irqs_disabled_flags(unsigned long flags) { -- cgit v1.2.1 From f461f1372cef1853534df2115f9ff5b7fbfc6958 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:31 -0700 Subject: include/asm-x86/kdebug.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kdebug.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index 99dcbafa1511..0c4175390dab 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h @@ -23,12 +23,12 @@ enum die_val { }; extern void printk_address(unsigned long address, int reliable); -extern void die(const char *,struct pt_regs *,long); +extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_registers(struct pt_regs *regs); extern void __show_registers(struct pt_regs *, int all); extern void show_trace(struct task_struct *t, struct pt_regs *regs, - unsigned long *sp, unsigned long bp); + unsigned long *sp, unsigned long bp); extern void __show_regs(struct pt_regs *regs); extern void show_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); -- cgit v1.2.1 From b69a3f9dc0bbdbf9278ac5bc8d4b6347c11a701b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:32 -0700 Subject: include/asm-x86/kexec.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kexec.h | 71 ++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index c90d3c77afc2..8f855a15f64d 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs, { #ifdef CONFIG_X86_32 newregs->sp = (unsigned long)&(oldregs->sp); - __asm__ __volatile__( - "xorl %%eax, %%eax\n\t" - "movw %%ss, %%ax\n\t" - :"=a"(newregs->ss)); + asm volatile("xorl %%eax, %%eax\n\t" + "movw %%ss, %%ax\n\t" + :"=a"(newregs->ss)); #endif } @@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs, crash_fixup_ss_esp(newregs, oldregs); } else { #ifdef CONFIG_X86_32 - __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx)); - __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx)); - __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx)); - __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si)); - __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di)); - __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp)); - __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax)); - __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp)); - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); - __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds)); - __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es)); - __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags)); + asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); + asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); + asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); + asm volatile("movl %%esi,%0" : "=m"(newregs->si)); + asm volatile("movl %%edi,%0" : "=m"(newregs->di)); + asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); + asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); + asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); + asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); + asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); + asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); + asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); + asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); #else - __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx)); - __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx)); - __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx)); - __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si)); - __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di)); - __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp)); - __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax)); - __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp)); - __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); - __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); - __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); - __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); - __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); - __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); - __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); - __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); - __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags)); + asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); + asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); + asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); + asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); + asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); + asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); + asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); + asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); + asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); + asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); + asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); + asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); + asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); + asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); + asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); + asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); + asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); + asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); + asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); #endif newregs->ip = (unsigned long)current_text_addr(); } -- cgit v1.2.1 From 864dfa13bed444d2c6d76d584e81713405d60082 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:33 -0700 Subject: include/asm-x86/kprobes.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kprobes.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index 61ad7b5d142e..54980b0b3892 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h @@ -35,12 +35,12 @@ typedef u8 kprobe_opcode_t; #define RELATIVEJUMP_INSTRUCTION 0xe9 #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ - (((unsigned long)current_thread_info()) + THREAD_SIZE \ - - (unsigned long)(ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + THREAD_SIZE \ - - (unsigned long)(ADDR))) +#define MIN_STACK_SIZE(ADDR) \ + (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ + THREAD_SIZE - (unsigned long)(ADDR))) \ + ? (MAX_STACK_SIZE) \ + : (((unsigned long)current_thread_info()) + \ + THREAD_SIZE - (unsigned long)(ADDR))) #define flush_insn_slot(p) do { } while (0) -- cgit v1.2.1 From 7d76b4d3767324fa44ac73032d6eeb428e46f004 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:34 -0700 Subject: include/asm-x86/kvm_host.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kvm_host.h | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 4702b04b979a..68ee390b2844 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -22,15 +22,16 @@ #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) -#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) +#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ + 0xFFFFFF0000000000ULL) -#define KVM_GUEST_CR0_MASK \ +#define KVM_GUEST_CR0_MASK \ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | X86_CR0_NW | X86_CR0_CD) -#define KVM_VM_CR0_ALWAYS_ON \ +#define KVM_VM_CR0_ALWAYS_ON \ (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | X86_CR0_MP) -#define KVM_GUEST_CR4_MASK \ +#define KVM_GUEST_CR4_MASK \ (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) @@ -133,12 +134,12 @@ struct kvm_pte_chain { union kvm_mmu_page_role { unsigned word; struct { - unsigned glevels : 4; - unsigned level : 4; - unsigned quadrant : 2; - unsigned pad_for_nice_hex_output : 6; - unsigned metaphysical : 1; - unsigned access : 3; + unsigned glevels:4; + unsigned level:4; + unsigned quadrant:2; + unsigned pad_for_nice_hex_output:6; + unsigned metaphysical:1; + unsigned access:3; }; }; @@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define TSS_BASE_SIZE 0x68 #define TSS_IOPB_SIZE (65536 / 8) #define TSS_REDIRECTION_SIZE (256 / 8) -#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) +#define RMODE_TSS_SIZE \ + (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) #endif -- cgit v1.2.1 From 0c7825e64d377409deaaf73cd6311da9df310db3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:35 -0700 Subject: include/asm-x86/kvm_x86_emulate.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/kvm_x86_emulate.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index 7db91b9bdcd4..d6337f941c98 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h @@ -68,10 +68,10 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ - int (*read_emulated) (unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu); + int (*read_emulated)(unsigned long addr, + void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); /* * write_emulated: Read bytes from emulated/special memory area. @@ -80,10 +80,10 @@ struct x86_emulate_ops { * required). * @bytes: [IN ] Number of bytes to write to memory. */ - int (*write_emulated) (unsigned long addr, - const void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu); + int (*write_emulated)(unsigned long addr, + const void *val, + unsigned int bytes, + struct kvm_vcpu *vcpu); /* * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an @@ -93,11 +93,11 @@ struct x86_emulate_ops { * @new: [IN ] Value to write to @addr. * @bytes: [IN ] Number of bytes to access using CMPXCHG. */ - int (*cmpxchg_emulated) (unsigned long addr, - const void *old, - const void *new, - unsigned int bytes, - struct kvm_vcpu *vcpu); + int (*cmpxchg_emulated)(unsigned long addr, + const void *old, + const void *new, + unsigned int bytes, + struct kvm_vcpu *vcpu); }; @@ -143,7 +143,7 @@ struct x86_emulate_ctxt { /* Register state before/after emulation. */ struct kvm_vcpu *vcpu; - /* Linear faulting address (if emulating a page-faulting instruction). */ + /* Linear faulting address (if emulating a page-faulting instruction) */ unsigned long eflags; /* Emulated execution mode, represented by an X86EMUL_MODE value. */ -- cgit v1.2.1 From fd1ea0c25ae00e2ac55881af55c3206664dd59a8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:36 -0700 Subject: include/asm-x86/lguest_hcall.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/lguest_hcall.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index f239e7069cab..a3241f28e34a 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h @@ -46,7 +46,7 @@ hcall(unsigned long call, { /* "int" is the Intel instruction to trigger a trap. */ asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) - /* The call in %eax (aka "a") might be overwritten */ + /* The call in %eax (aka "a") might be overwritten */ : "=a"(call) /* The arguments are in %eax, %edx, %ebx & %ecx */ : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) @@ -62,8 +62,7 @@ hcall(unsigned long call, #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) #define LHCALL_RING_SIZE 64 -struct hcall_args -{ +struct hcall_args { /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ unsigned long arg0, arg2, arg3, arg1; }; -- cgit v1.2.1 From fb444c7b25420d57ce5e31cab486f734705bd278 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:37 -0700 Subject: include/asm-x86/lguest.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/lguest.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index 9b17571e9bc3..be4a7247fa2b 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h @@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[]; extern void lguest_iret(void); extern void lguest_init(void); -struct lguest_regs -{ +struct lguest_regs { /* Manually saved part. */ unsigned long eax, ebx, ecx, edx; unsigned long esi, edi, ebp; @@ -51,8 +50,7 @@ struct lguest_regs }; /* This is a guest-specific page (mapped ro) into the guest. */ -struct lguest_ro_state -{ +struct lguest_ro_state { /* Host information we need to restore when we switch back. */ u32 host_cr3; struct desc_ptr host_idt_desc; @@ -67,8 +65,7 @@ struct lguest_ro_state struct desc_struct guest_gdt[GDT_ENTRIES]; }; -struct lg_cpu_arch -{ +struct lg_cpu_arch { /* The GDT entries copied into lguest_ro_state when running. */ struct desc_struct gdt[GDT_ENTRIES]; @@ -85,7 +82,7 @@ static inline void lguest_set_ts(void) cr0 = read_cr0(); if (!(cr0 & 8)) - write_cr0(cr0|8); + write_cr0(cr0 | 8); } /* Full 4G segment descriptors, suitable for CS and DS. */ -- cgit v1.2.1 From 69cde6512c3a0227878869f9ba8a02cdc72fc253 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:39 -0700 Subject: include/asm-x86/local.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/local.h | 105 +++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 55 deletions(-) diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index f852c62b3319..330a72496abd 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h @@ -18,32 +18,28 @@ typedef struct { static inline void local_inc(local_t *l) { - __asm__ __volatile__( - _ASM_INC "%0" - :"+m" (l->a.counter)); + asm volatile(_ASM_INC "%0" + : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - __asm__ __volatile__( - _ASM_DEC "%0" - :"+m" (l->a.counter)); + asm volatile(_ASM_DEC "%0" + : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - __asm__ __volatile__( - _ASM_ADD "%1,%0" - :"+m" (l->a.counter) - :"ir" (i)); + asm volatile(_ASM_ADD "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - __asm__ __volatile__( - _ASM_SUB "%1,%0" - :"+m" (l->a.counter) - :"ir" (i)); + asm volatile(_ASM_SUB "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); } /** @@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_SUB "%2,%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(_ASM_SUB "%2,%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_DEC "%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); + asm volatile(_ASM_DEC "%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_INC "%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); + asm volatile(_ASM_INC "%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_ADD "%2,%0; sets %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(_ASM_ADD "%2,%0; sets %1" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l) #endif /* Modern 486+ processor */ __i = i; - __asm__ __volatile__( - _ASM_XADD "%0, %1;" - :"+r" (i), "+m" (l->a.counter) - : : "memory"); + asm volatile(_ASM_XADD "%0, %1;" + : "+r" (i), "+m" (l->a.counter) + : : "memory"); return i + __i; #ifdef CONFIG_M386 @@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l) #define local_add_unless(l, a, u) \ ({ \ long c, old; \ - c = local_read(l); \ + c = local_read((l)); \ for (;;) { \ if (unlikely(c == (u))) \ break; \ - old = local_cmpxchg((l), c, c + (a)); \ + old = local_cmpxchg((l), c, c + (a)); \ if (likely(old == c)) \ break; \ c = old; \ @@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; }) +#define cpu_local_wrap_v(l) \ +({ \ + local_t res__; \ + preempt_disable(); \ + res__ = (l); \ + preempt_enable(); \ + res__; \ +}) #define cpu_local_wrap(l) \ - ({ preempt_disable(); \ - l; \ - preempt_enable(); }) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) - -#define __cpu_local_inc(l) cpu_local_inc(l) -#define __cpu_local_dec(l) cpu_local_dec(l) +({ \ + preempt_disable(); \ + (l); \ + preempt_enable(); \ +}) \ + +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) + +#define __cpu_local_inc(l) cpu_local_inc((l)) +#define __cpu_local_dec(l) cpu_local_dec((l)) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) -- cgit v1.2.1 From 933a44155caeb4ff5b58fcf755e3381ae37e72d4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:40 -0700 Subject: include/asm-x86/mc146818rtc.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mc146818rtc.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index cdd9f965835a..daf1ccde77af 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h @@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock; static inline void lock_cmos(unsigned char reg) { unsigned long new; - new = ((smp_processor_id()+1) << 8) | reg; + new = ((smp_processor_id() + 1) << 8) | reg; for (;;) { if (cmos_lock) { cpu_relax(); @@ -57,22 +57,26 @@ static inline void unlock_cmos(void) { cmos_lock = 0; } + static inline int do_i_have_lock_cmos(void) { - return (cmos_lock >> 8) == (smp_processor_id()+1); + return (cmos_lock >> 8) == (smp_processor_id() + 1); } + static inline unsigned char current_lock_cmos_reg(void) { return cmos_lock & 0xff; } -#define lock_cmos_prefix(reg) \ + +#define lock_cmos_prefix(reg) \ do { \ unsigned long cmos_flags; \ local_irq_save(cmos_flags); \ lock_cmos(reg) -#define lock_cmos_suffix(reg) \ - unlock_cmos(); \ - local_irq_restore(cmos_flags); \ + +#define lock_cmos_suffix(reg) \ + unlock_cmos(); \ + local_irq_restore(cmos_flags); \ } while (0) #else #define lock_cmos_prefix(reg) do {} while (0) -- cgit v1.2.1 From 77d511ed744f7febcd2a3444f63e6c54ac32d0c3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:41 -0700 Subject: include/asm-x86/mca_dma.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mca_dma.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h index fbb1f3b71279..c3dca6edc6b1 100644 --- a/include/asm-x86/mca_dma.h +++ b/include/asm-x86/mca_dma.h @@ -12,18 +12,18 @@ * count by 2 when using 16-bit dma; that is not handled by these functions. * * Ramen Noodles are yummy. - * - * 1998 Tymm Twillman + * + * 1998 Tymm Twillman */ /* - * Registers that are used by the DMA controller; FN is the function register + * Registers that are used by the DMA controller; FN is the function register * (tell the controller what to do) and EXE is the execution register (how * to do it) */ #define MCA_DMA_REG_FN 0x18 -#define MCA_DMA_REG_EXE 0x1A +#define MCA_DMA_REG_EXE 0x1A /* * Functions that the DMA controller can do @@ -43,9 +43,9 @@ /* * Modes (used by setting MCA_DMA_FN_MODE in the function register) - * + * * Note that the MODE_READ is read from memory (write to device), and - * MODE_WRITE is vice-versa. + * MODE_WRITE is vice-versa. */ #define MCA_DMA_MODE_XFER 0x04 /* read by default */ @@ -63,7 +63,7 @@ * IRQ context. */ -static __inline__ void mca_enable_dma(unsigned int dmanr) +static inline void mca_enable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); } @@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr) * IRQ context. */ -static __inline__ void mca_disable_dma(unsigned int dmanr) +static inline void mca_disable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); } @@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr) * @a: 24bit bus address * * Load the address register in the DMA controller. This has a 24bit - * limitation (16Mb). + * limitation (16Mb). */ -static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) +static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a) { outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); outb(a & 0xff, MCA_DMA_REG_EXE); @@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) * limitation (16Mb). The return is a bus address. */ -static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) +static inline unsigned int mca_get_dma_addr(unsigned int dmanr) { unsigned int addr; outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); addr = inb(MCA_DMA_REG_EXE); addr |= inb(MCA_DMA_REG_EXE) << 8; - addr |= inb(MCA_DMA_REG_EXE) << 16; + addr |= inb(MCA_DMA_REG_EXE) << 16; return addr; } @@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) * Setting a count of zero will not do what you expect. */ -static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) +static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count) { count--; /* transfers one more than count -- correct for this */ @@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) * on this DMA channel. */ -static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) +static inline unsigned int mca_get_dma_residue(unsigned int dmanr) { unsigned short count; @@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) * with an I/O port target. */ -static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) +static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) { /* * DMA from a port address -- set the io address */ - + outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); outb(io_addr & 0xff, MCA_DMA_REG_EXE); outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); @@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) * %MCA_DMA_MODE_16 to do 16bit transfers. */ -static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) +static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) { outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); outb(mode, MCA_DMA_REG_EXE); -- cgit v1.2.1 From 55464da94a845e057ffb94a9fc7be1aa86ffcd89 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:42 -0700 Subject: include/asm-x86/mmu_context_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmu_context_32.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 8198d1cca1f3..9756ae0f1dd3 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h @@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled + /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); @@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev, #define deactivate_mm(tsk, mm) \ asm("movl %0,%%gs": :"r" (0)); -#define activate_mm(prev, next) \ - do { \ - paravirt_activate_mm(prev, next); \ - switch_mm((prev),(next),NULL); \ - } while(0); +#define activate_mm(prev, next) \ +do { \ + paravirt_activate_mm((prev), (next)); \ + switch_mm((prev), (next), NULL); \ +} while (0); #endif -- cgit v1.2.1 From c4fe760efde84e52168a81bf125f25ba2f118b51 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:43 -0700 Subject: include/asm-x86/mmu_context_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmu_context_64.h | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index ad6dc821ef9e..ca44c71e7fb3 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h @@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP - if (read_pda(mmu_state) == TLBSTATE_OK) + if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); #endif } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); @@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); - if (unlikely(next->context.ldt != prev->context.ldt)) + if (unlikely(next->context.ldt != prev->context.ldt)) load_LDT_nolock(&next->context); } #ifdef CONFIG_SMP @@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (read_pda(active_mm) != next) BUG(); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled + /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ @@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, #endif } -#define deactivate_mm(tsk,mm) do { \ - load_gs_index(0); \ - asm volatile("movl %0,%%fs"::"r"(0)); \ -} while(0) +#define deactivate_mm(tsk, mm) \ +do { \ + load_gs_index(0); \ + asm volatile("movl %0,%%fs"::"r"(0)); \ +} while (0) -#define activate_mm(prev, next) \ - switch_mm((prev),(next),NULL) +#define activate_mm(prev, next) \ + switch_mm((prev), (next), NULL) #endif -- cgit v1.2.1 From 710d0e9cce8bfcfa821c5929c1d4dbf30a660ce6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:44 -0700 Subject: include/asm-x86/mmu.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index efa962c38897..00e88679e11f 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h @@ -10,10 +10,10 @@ * * cpu_vm_mask is used to optimize ldt flushing. */ -typedef struct { +typedef struct { void *ldt; #ifdef CONFIG_X86_64 - rwlock_t ldtlock; + rwlock_t ldtlock; #endif int size; struct mutex lock; -- cgit v1.2.1 From f2334076890bbe3cddca2c053684653c614e9b48 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:45 -0700 Subject: include/asm-x86/mmx.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h index 46b71da99869..940881218ff8 100644 --- a/include/asm-x86/mmx.h +++ b/include/asm-x86/mmx.h @@ -6,7 +6,7 @@ */ #include - + extern void *_mmx_memcpy(void *to, const void *from, size_t size); extern void mmx_clear_page(void *page); extern void mmx_copy_page(void *to, void *from); -- cgit v1.2.1 From 7491d33d9ab042f3fdb9ec00054f69737dcd180f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:46 -0700 Subject: include/asm-x86/mmzone_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index b9f5be2f603b..cb2cad0b65a7 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@ -18,7 +18,7 @@ extern struct pglist_data *node_data[]; #include #endif -extern int get_memcfg_numa_flat(void ); +extern int get_memcfg_numa_flat(void); /* * This allows any one NUMA architecture to be compiled * for, and still fall back to the flat function if it -- cgit v1.2.1 From 60e9bfd1bd5be433e56b050e025d3d5c91c967a8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:47 -0700 Subject: include/asm-x86/mmzone_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_64.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index ebaf9663aa8a..594bd0dc1d08 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h @@ -7,7 +7,7 @@ #ifdef CONFIG_NUMA -#define VIRTUAL_BUG_ON(x) +#define VIRTUAL_BUG_ON(x) #include @@ -16,7 +16,7 @@ struct memnode { int shift; unsigned int mapsize; s16 *map; - s16 embedded_map[64-8]; + s16 embedded_map[64 - 8]; } ____cacheline_aligned; /* total size = 128 bytes */ extern struct memnode memnode; #define memnode_shift memnode.shift @@ -25,27 +25,27 @@ extern struct memnode memnode; extern struct pglist_data *node_data[]; -static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) -{ - unsigned nid; +static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) +{ + unsigned nid; VIRTUAL_BUG_ON(!memnodemap); VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize); - nid = memnodemap[addr >> memnode_shift]; - VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); - return nid; -} + nid = memnodemap[addr >> memnode_shift]; + VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); + return nid; +} #define NODE_DATA(nid) (node_data[nid]) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ +#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages) extern int early_pfn_to_nid(unsigned long pfn); #ifdef CONFIG_NUMA_EMU -#define FAKE_NODE_MIN_SIZE (64*1024*1024) -#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL)) +#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) +#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) #endif #endif -- cgit v1.2.1 From 8f08403e61a86c3179642239184aff3a5f636be1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:48 -0700 Subject: include/asm-x86/mpspec_def.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec_def.h | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 1f35691b4f7c..dc6ef85e3624 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h @@ -11,7 +11,7 @@ * information is. */ -#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') +#define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_') #ifdef CONFIG_X86_32 # define MAX_MPC_ENTRY 1024 @@ -23,8 +23,7 @@ # define MAX_APICS 255 #endif -struct intel_mp_floating -{ +struct intel_mp_floating { char mpf_signature[4]; /* "_MP_" */ unsigned int mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ @@ -39,14 +38,13 @@ struct intel_mp_floating #define MPC_SIGNATURE "PCMP" -struct mp_config_table -{ +struct mp_config_table { char mpc_signature[4]; unsigned short mpc_length; /* Size of table */ - char mpc_spec; /* 0x01 */ - char mpc_checksum; - char mpc_oem[8]; - char mpc_productid[12]; + char mpc_spec; /* 0x01 */ + char mpc_checksum; + char mpc_oem[8]; + char mpc_productid[12]; unsigned int mpc_oemptr; /* 0 if not present */ unsigned short mpc_oemsize; /* 0 if not present */ unsigned short mpc_oemcount; @@ -71,8 +69,7 @@ struct mp_config_table #define CPU_MODEL_MASK 0x00F0 #define CPU_FAMILY_MASK 0x0F00 -struct mpc_config_processor -{ +struct mpc_config_processor { unsigned char mpc_type; unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ @@ -82,8 +79,7 @@ struct mpc_config_processor unsigned int mpc_reserved[2]; }; -struct mpc_config_bus -{ +struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; unsigned char mpc_bustype[6]; @@ -111,8 +107,7 @@ struct mpc_config_bus #define MPC_APIC_USABLE 0x01 -struct mpc_config_ioapic -{ +struct mpc_config_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; @@ -120,8 +115,7 @@ struct mpc_config_ioapic unsigned int mpc_apicaddr; }; -struct mpc_config_intsrc -{ +struct mpc_config_intsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; @@ -144,8 +138,7 @@ enum mp_irq_source_types { #define MP_APIC_ALL 0xFF -struct mpc_config_lintsrc -{ +struct mpc_config_lintsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; @@ -157,8 +150,7 @@ struct mpc_config_lintsrc #define MPC_OEM_SIGNATURE "_OEM" -struct mp_config_oemtable -{ +struct mp_config_oemtable { char oem_signature[4]; unsigned short oem_length; /* Size of table */ char oem_rev; /* 0x01 */ @@ -185,4 +177,3 @@ enum mp_bustype { MP_BUS_MCA, }; #endif - -- cgit v1.2.1 From 30971e17ee484f72e081826a0bf3e489ef3b4c30 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:49 -0700 Subject: include/asm-x86/mpspec.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 99da0a59b436..eccbc581ec84 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -55,8 +55,7 @@ extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) -struct physid_mask -{ +struct physid_mask { unsigned long mask[PHYSID_ARRAY_SIZE]; }; @@ -65,34 +64,34 @@ typedef struct physid_mask physid_mask_t; #define physid_set(physid, map) set_bit(physid, (map).mask) #define physid_clear(physid, map) clear_bit(physid, (map).mask) #define physid_isset(physid, map) test_bit(physid, (map).mask) -#define physid_test_and_set(physid, map) \ +#define physid_test_and_set(physid, map) \ test_and_set_bit(physid, (map).mask) -#define physids_and(dst, src1, src2) \ +#define physids_and(dst, src1, src2) \ bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_or(dst, src1, src2) \ +#define physids_or(dst, src1, src2) \ bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_clear(map) \ +#define physids_clear(map) \ bitmap_zero((map).mask, MAX_APICS) -#define physids_complement(dst, src) \ +#define physids_complement(dst, src) \ bitmap_complement((dst).mask, (src).mask, MAX_APICS) -#define physids_empty(map) \ +#define physids_empty(map) \ bitmap_empty((map).mask, MAX_APICS) -#define physids_equal(map1, map2) \ +#define physids_equal(map1, map2) \ bitmap_equal((map1).mask, (map2).mask, MAX_APICS) -#define physids_weight(map) \ +#define physids_weight(map) \ bitmap_weight((map).mask, MAX_APICS) -#define physids_shift_right(d, s, n) \ +#define physids_shift_right(d, s, n) \ bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) -#define physids_shift_left(d, s, n) \ +#define physids_shift_left(d, s, n) \ bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) #define physids_coerce(map) ((map).mask[0]) -- cgit v1.2.1 From 934902b474bdb235a273985ad4c61eb136afe11d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:50 -0700 Subject: include/asm-x86/msidef.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/msidef.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h index 5b8acddb70fb..296f29ce426d 100644 --- a/include/asm-x86/msidef.h +++ b/include/asm-x86/msidef.h @@ -11,7 +11,8 @@ #define MSI_DATA_VECTOR_SHIFT 0 #define MSI_DATA_VECTOR_MASK 0x000000ff -#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) +#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ + MSI_DATA_VECTOR_MASK) #define MSI_DATA_DELIVERY_MODE_SHIFT 8 #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) @@ -37,11 +38,14 @@ #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) #define MSI_ADDR_REDIRECTION_SHIFT 3 -#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ -#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) + /* dedicated cpu */ +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) + /* lowest priority */ #define MSI_ADDR_DEST_ID_SHIFT 12 #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 -#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) +#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ + MSI_ADDR_DEST_ID_MASK) #endif /* ASM_MSIDEF_H */ -- cgit v1.2.1 From abb0ade013507c93a9a0b263bbb7b0327d7c38db Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:51 -0700 Subject: include/asm-x86/msr.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/msr.h | 81 ++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 3ca29ebebbb1..2c698a2e81f9 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -16,8 +16,8 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) { unsigned long low, high; - asm volatile (".byte 0x0f,0x01,0xf9" - : "=a" (low), "=d" (high), "=c" (*aux)); + asm volatile(".byte 0x0f,0x01,0xf9" + : "=a" (low), "=d" (high), "=c" (*aux)); return low | ((u64)high >> 32); } @@ -29,7 +29,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) */ #ifdef CONFIG_X86_64 #define DECLARE_ARGS(val, low, high) unsigned low, high -#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32)) +#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) #else @@ -57,7 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, ".section .fixup,\"ax\"\n\t" "3: mov %3,%0 ; jmp 1b\n\t" ".previous\n\t" - _ASM_EXTABLE(2b,3b) + _ASM_EXTABLE(2b, 3b) : "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr), "i" (-EFAULT)); return EAX_EDX_VAL(val, low, high); @@ -78,10 +78,10 @@ static inline int native_write_msr_safe(unsigned int msr, ".section .fixup,\"ax\"\n\t" "3: mov %4,%0 ; jmp 1b\n\t" ".previous\n\t" - _ASM_EXTABLE(2b,3b) + _ASM_EXTABLE(2b, 3b) : "=a" (err) : "c" (msr), "0" (low), "d" (high), - "i" (-EFAULT)); + "i" (-EFAULT)); return err; } @@ -116,23 +116,23 @@ static inline unsigned long long native_read_pmc(int counter) * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr,val1,val2) \ - do { \ - u64 __val = native_read_msr(msr); \ - (val1) = (u32)__val; \ - (val2) = (u32)(__val >> 32); \ - } while(0) +#define rdmsr(msr, val1, val2) \ +do { \ + u64 __val = native_read_msr((msr)); \ + (val1) = (u32)__val; \ + (val2) = (u32)(__val >> 32); \ +} while (0) static inline void wrmsr(unsigned msr, unsigned low, unsigned high) { native_write_msr(msr, low, high); } -#define rdmsrl(msr,val) \ - ((val) = native_read_msr(msr)) +#define rdmsrl(msr, val) \ + ((val) = native_read_msr((msr))) #define wrmsrl(msr, val) \ - native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32)) + native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) /* wrmsr with exception handling */ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) @@ -141,14 +141,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) } /* rdmsr with exception handling */ -#define rdmsr_safe(msr,p1,p2) \ - ({ \ - int __err; \ - u64 __val = native_read_msr_safe(msr, &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ - __err; \ - }) +#define rdmsr_safe(msr, p1, p2) \ +({ \ + int __err; \ + u64 __val = native_read_msr_safe((msr), &__err); \ + (*p1) = (u32)__val; \ + (*p2) = (u32)(__val >> 32); \ + __err; \ +}) #define rdtscl(low) \ ((low) = (u32)native_read_tsc()) @@ -156,35 +156,37 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) #define rdtscll(val) \ ((val) = native_read_tsc()) -#define rdpmc(counter,low,high) \ - do { \ - u64 _l = native_read_pmc(counter); \ - (low) = (u32)_l; \ - (high) = (u32)(_l >> 32); \ - } while(0) +#define rdpmc(counter, low, high) \ +do { \ + u64 _l = native_read_pmc((counter)); \ + (low) = (u32)_l; \ + (high) = (u32)(_l >> 32); \ +} while (0) -#define rdtscp(low, high, aux) \ - do { \ - unsigned long long _val = native_read_tscp(&(aux)); \ - (low) = (u32)_val; \ - (high) = (u32)(_val >> 32); \ - } while (0) +#define rdtscp(low, high, aux) \ +do { \ + unsigned long long _val = native_read_tscp(&(aux)); \ + (low) = (u32)_val; \ + (high) = (u32)(_val >> 32); \ +} while (0) #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) #endif /* !CONFIG_PARAVIRT */ -#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) +#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ + (u32)((val) >> 32)) -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) +#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2)) -#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) +#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) #ifdef CONFIG_SMP void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); #else /* CONFIG_SMP */ static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) @@ -195,7 +197,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { wrmsr(msr_no, l, h); } -static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, + u32 *l, u32 *h) { return rdmsr_safe(msr_no, l, h); } -- cgit v1.2.1 From 9969b4405469e12070c560ff27dbe587470fc945 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:52 -0700 Subject: include/asm-x86/mtrr.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mtrr.h | 64 +++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index d3d26625aeaf..a69a01a51729 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h @@ -28,8 +28,7 @@ #define MTRR_IOCTL_BASE 'M' -struct mtrr_sentry -{ +struct mtrr_sentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ @@ -41,8 +40,7 @@ struct mtrr_sentry will break. */ #ifdef __i386__ -struct mtrr_gentry -{ +struct mtrr_gentry { unsigned int regnum; /* Register number */ unsigned long base; /* Base address */ unsigned int size; /* Size of region */ @@ -51,8 +49,7 @@ struct mtrr_gentry #else /* __i386__ */ -struct mtrr_gentry -{ +struct mtrr_gentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int regnum; /* Register number */ @@ -89,12 +86,12 @@ struct mtrr_gentry extern u8 mtrr_type_lookup(u64 addr, u64 end); extern void mtrr_save_fixed_ranges(void *); extern void mtrr_save_state(void); -extern int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, bool increment); -extern int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, bool increment); -extern int mtrr_del (int reg, unsigned long base, unsigned long size); -extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); +extern int mtrr_add(unsigned long base, unsigned long size, + unsigned int type, bool increment); +extern int mtrr_add_page(unsigned long base, unsigned long size, + unsigned int type, bool increment); +extern int mtrr_del(int reg, unsigned long base, unsigned long size); +extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); @@ -110,23 +107,21 @@ static inline u8 mtrr_type_lookup(u64 addr, u64 end) } #define mtrr_save_fixed_ranges(arg) do {} while (0) #define mtrr_save_state() do {} while (0) -static __inline__ int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, bool increment) +static inline int mtrr_add(unsigned long base, unsigned long size, + unsigned int type, bool increment) { return -ENODEV; } -static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, +static inline int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, bool increment) { return -ENODEV; } -static __inline__ int mtrr_del (int reg, unsigned long base, - unsigned long size) +static inline int mtrr_del(int reg, unsigned long base, unsigned long size) { return -ENODEV; } -static __inline__ int mtrr_del_page (int reg, unsigned long base, - unsigned long size) +static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size) { return -ENODEV; } @@ -134,7 +129,9 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) { return 0; } -static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} +static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) +{ +} #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) @@ -143,15 +140,13 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} #ifdef CONFIG_COMPAT #include -struct mtrr_sentry32 -{ +struct mtrr_sentry32 { compat_ulong_t base; /* Base address */ compat_uint_t size; /* Size of region */ compat_uint_t type; /* Type of region */ }; -struct mtrr_gentry32 -{ +struct mtrr_gentry32 { compat_ulong_t regnum; /* Register number */ compat_uint_t base; /* Base address */ compat_uint_t size; /* Size of region */ @@ -160,16 +155,17 @@ struct mtrr_gentry32 #define MTRR_IOCTL_BASE 'M' -#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) -#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) -#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) -#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) -#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) -#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) -#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) -#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) -#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) -#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) +#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) +#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) +#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) +#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) +#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) +#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) +#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) +#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) +#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) +#define MTRRIOC32_KILL_PAGE_ENTRY \ + _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) #endif /* CONFIG_COMPAT */ #endif /* __KERNEL__ */ -- cgit v1.2.1 From b2347fad517f61553e03135db60def2392d9c2bc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:53 -0700 Subject: include/asm-x86/mutex_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mutex_32.h | 64 ++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index 9a6b3da25914..73e928ef5f03 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h @@ -21,22 +21,20 @@ * wasn't 1 originally. This function MUST leave the value lower than 1 * even when the "1" assertion wasn't true. */ -#define __mutex_fastpath_lock(count, fail_fn) \ -do { \ - unsigned int dummy; \ - \ - typecheck(atomic_t *, count); \ +#define __mutex_fastpath_lock(count, fail_fn) \ +do { \ + unsigned int dummy; \ + \ + typecheck(atomic_t *, count); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " decl (%%eax) \n" \ - " jns 1f \n" \ - " call "#fail_fn" \n" \ - "1: \n" \ - \ - :"=a" (dummy) \ - : "a" (count) \ - : "memory", "ecx", "edx"); \ + \ + asm volatile(LOCK_PREFIX " decl (%%eax)\n" \ + " jns 1f \n" \ + " call " #fail_fn "\n" \ + "1:\n" \ + : "=a" (dummy) \ + : "a" (count) \ + : "memory", "ecx", "edx"); \ } while (0) @@ -50,8 +48,8 @@ do { \ * wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_lock_retval(atomic_t *count, + int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); @@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * to return 0 otherwise. */ -#define __mutex_fastpath_unlock(count, fail_fn) \ -do { \ - unsigned int dummy; \ - \ - typecheck(atomic_t *, count); \ +#define __mutex_fastpath_unlock(count, fail_fn) \ +do { \ + unsigned int dummy; \ + \ + typecheck(atomic_t *, count); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " incl (%%eax) \n" \ - " jg 1f \n" \ - " call "#fail_fn" \n" \ - "1: \n" \ - \ - :"=a" (dummy) \ - : "a" (count) \ - : "memory", "ecx", "edx"); \ + \ + asm volatile(LOCK_PREFIX " incl (%%eax)\n" \ + " jg 1f\n" \ + " call " #fail_fn "\n" \ + "1:\n" \ + : "=a" (dummy) \ + : "a" (count) \ + : "memory", "ecx", "edx"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 @@ -104,8 +100,8 @@ do { \ * Additionally, if the value was < 0 originally, this function must not leave * it to 0 on failure. */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_trylock(atomic_t *count, + int (*fail_fn)(atomic_t *)) { /* * We have two variants here. The cmpxchg based one is the best one -- cgit v1.2.1 From 2c4e8830414de84cc969a1f9685f4c48b91ca6e7 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:54 -0700 Subject: include/asm-x86/mutex_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/mutex_64.h | 73 +++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 39 deletions(-) diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h index 6c2949a3c677..f3fae9becb38 100644 --- a/include/asm-x86/mutex_64.h +++ b/include/asm-x86/mutex_64.h @@ -16,23 +16,21 @@ * * Atomically decrements @v and calls if the result is negative. */ -#define __mutex_fastpath_lock(v, fail_fn) \ -do { \ - unsigned long dummy; \ - \ - typecheck(atomic_t *, v); \ - typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " decl (%%rdi) \n" \ - " jns 1f \n" \ - " call "#fail_fn" \n" \ - "1:" \ - \ - :"=D" (dummy) \ - : "D" (v) \ - : "rax", "rsi", "rdx", "rcx", \ - "r8", "r9", "r10", "r11", "memory"); \ +#define __mutex_fastpath_lock(v, fail_fn) \ +do { \ + unsigned long dummy; \ + \ + typecheck(atomic_t *, v); \ + typecheck_fn(void (*)(atomic_t *), fail_fn); \ + \ + asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \ + " jns 1f \n" \ + " call " #fail_fn "\n" \ + "1:" \ + : "=D" (dummy) \ + : "D" (v) \ + : "rax", "rsi", "rdx", "rcx", \ + "r8", "r9", "r10", "r11", "memory"); \ } while (0) /** @@ -45,9 +43,8 @@ do { \ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, - int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_lock_retval(atomic_t *count, + int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); @@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count, * * Atomically increments @v and calls if the result is nonpositive. */ -#define __mutex_fastpath_unlock(v, fail_fn) \ -do { \ - unsigned long dummy; \ - \ - typecheck(atomic_t *, v); \ - typecheck_fn(void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " incl (%%rdi) \n" \ - " jg 1f \n" \ - " call "#fail_fn" \n" \ - "1: " \ - \ - :"=D" (dummy) \ - : "D" (v) \ - : "rax", "rsi", "rdx", "rcx", \ - "r8", "r9", "r10", "r11", "memory"); \ +#define __mutex_fastpath_unlock(v, fail_fn) \ +do { \ + unsigned long dummy; \ + \ + typecheck(atomic_t *, v); \ + typecheck_fn(void (*)(atomic_t *), fail_fn); \ + \ + asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \ + " jg 1f\n" \ + " call " #fail_fn "\n" \ + "1:" \ + : "=D" (dummy) \ + : "D" (v) \ + : "rax", "rsi", "rdx", "rcx", \ + "r8", "r9", "r10", "r11", "memory"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 @@ -93,8 +88,8 @@ do { \ * if it wasn't 1 originally. [the fallback function is never used on * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +static inline int __mutex_fastpath_trylock(atomic_t *count, + int (*fail_fn)(atomic_t *)) { if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; -- cgit v1.2.1 From cb046eed76b7f74e619479f1aba17e74ce6f5159 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:55 -0700 Subject: include/asm-x86/numa_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/numa_64.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 15fe07cde586..32c22ae0709f 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h @@ -1,11 +1,12 @@ -#ifndef _ASM_X8664_NUMA_H +#ifndef _ASM_X8664_NUMA_H #define _ASM_X8664_NUMA_H 1 #include #include struct bootnode { - u64 start,end; + u64 start; + u64 end; }; extern int compute_hash_shift(struct bootnode *nodes, int numnodes); -- cgit v1.2.1 From 5f4e4b7209deb3cd7cf16ebb7bf84917e4b6682a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:56 -0700 Subject: include/asm-x86/numaq.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/numaq.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h index 38f710dc37f2..94b86c31239a 100644 --- a/include/asm-x86/numaq.h +++ b/include/asm-x86/numaq.h @@ -3,7 +3,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void); /* * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the */ -#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ +#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private + quad space */ /* * Communication area for each processor on lynxer-processor tests. @@ -139,7 +140,7 @@ struct sys_cfg_data { unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ /* may not be totally populated */ - unsigned int split_mem_enbl; /* 0 for no low shared memory */ + unsigned int split_mem_enbl; /* 0 for no low shared memory */ unsigned int mmio_sz; /* Size of total system memory mapped I/O */ /* (in MB). */ unsigned int quad_spin_lock; /* Spare location used for quad */ @@ -152,7 +153,7 @@ struct sys_cfg_data { /* * memory configuration area for each quad */ - struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ + struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ }; static inline unsigned long *get_zholes_size(int nid) -- cgit v1.2.1 From 095d1c4e61d77de4b33ea3b202aa6342f69d9891 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:57 -0700 Subject: include/asm-x86/page_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/page_32.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 5f7257fd589b..424e82f8ae27 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h @@ -47,7 +47,10 @@ typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; typedef unsigned long phys_addr_t; -typedef union { pteval_t pte, pte_low; } pte_t; +typedef union { + pteval_t pte; + pteval_t pte_low; +} pte_t; #endif /* __ASSEMBLY__ */ #endif /* CONFIG_X86_PAE */ @@ -61,7 +64,7 @@ typedef struct page *pgtable_t; #endif #ifndef __ASSEMBLY__ -#define __phys_addr(x) ((x)-PAGE_OFFSET) +#define __phys_addr(x) ((x) - PAGE_OFFSET) #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) #ifdef CONFIG_FLATMEM @@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) +#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) #ifdef CONFIG_X86_USE_3DNOW #include -- cgit v1.2.1 From b20a4615944a0c106fce2aecb7ea1dbc8eefc71b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:58 -0700 Subject: include/asm-x86/page_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/page_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index aee05c616e05..f156778f707c 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -5,7 +5,7 @@ #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) -#define CURRENT_MASK (~(THREAD_SIZE-1)) +#define CURRENT_MASK (~(THREAD_SIZE - 1)) #define EXCEPTION_STACK_ORDER 0 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) @@ -51,7 +51,7 @@ * Kernel image size is limited to 512 MB (see level2_kernel_pgt in * arch/x86/kernel/head_64.S), and it is mapped here: */ -#define KERNEL_IMAGE_SIZE (512*1024*1024) +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #ifndef __ASSEMBLY__ -- cgit v1.2.1 From fad599854e3997a3e93559e19759a26b18c906c6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:02:59 -0700 Subject: include/asm-x86/param.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/param.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h index c996ec4da0c8..6f0d0422f4ca 100644 --- a/include/asm-x86/param.h +++ b/include/asm-x86/param.h @@ -3,8 +3,8 @@ #ifdef __KERNEL__ # define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ +# define USER_HZ 100 /* some user interfaces are */ +# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ #endif #ifndef HZ -- cgit v1.2.1 From 49cd740bb0b5796f34699a0f945b977f6ff34c64 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:00 -0700 Subject: include/asm-x86/paravirt.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/paravirt.h | 47 +++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index d6236eb46466..0c23f7940bc4 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -231,7 +231,8 @@ struct pv_mmu_ops { void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pte_update)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -246,7 +247,8 @@ struct pv_mmu_ops { void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pte_clear)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); void (*pmd_clear)(pmd_t *pmdp); #endif /* CONFIG_X86_PAE */ @@ -274,8 +276,7 @@ struct pv_mmu_ops { /* This contains all the paravirt structures: we get a convenient * number for each function using the offset which we use to indicate * what to patch. */ -struct paravirt_patch_template -{ +struct paravirt_patch_template { struct pv_init_ops pv_init_ops; struct pv_time_ops pv_time_ops; struct pv_cpu_ops pv_cpu_ops; @@ -660,32 +661,37 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) } /* These should all do BUG_ON(_err), but our headers are too tangled. */ -#define rdmsr(msr,val1,val2) do { \ +#define rdmsr(msr, val1, val2) \ +do { \ int _err; \ u64 _l = paravirt_read_msr(msr, &_err); \ val1 = (u32)_l; \ val2 = _l >> 32; \ -} while(0) +} while (0) -#define wrmsr(msr,val1,val2) do { \ +#define wrmsr(msr, val1, val2) \ +do { \ paravirt_write_msr(msr, val1, val2); \ -} while(0) +} while (0) -#define rdmsrl(msr,val) do { \ +#define rdmsrl(msr, val) \ +do { \ int _err; \ val = paravirt_read_msr(msr, &_err); \ -} while(0) +} while (0) -#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) -#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) +#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) +#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) /* rdmsr with exception handling */ -#define rdmsr_safe(msr,a,b) ({ \ +#define rdmsr_safe(msr, a, b) \ +({ \ int _err; \ u64 _l = paravirt_read_msr(msr, &_err); \ (*a) = (u32)_l; \ (*b) = _l >> 32; \ - _err; }) + _err; \ +}) static inline u64 paravirt_read_tsc(void) @@ -693,10 +699,11 @@ static inline u64 paravirt_read_tsc(void) return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); } -#define rdtscl(low) do { \ +#define rdtscl(low) \ +do { \ u64 _l = paravirt_read_tsc(); \ low = (int)_l; \ -} while(0) +} while (0) #define rdtscll(val) (val = paravirt_read_tsc()) @@ -711,11 +718,12 @@ static inline unsigned long long paravirt_read_pmc(int counter) return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); } -#define rdpmc(counter,low,high) do { \ +#define rdpmc(counter, low, high) \ +do { \ u64 _l = paravirt_read_pmc(counter); \ low = (u32)_l; \ high = _l >> 32; \ -} while(0) +} while (0) static inline unsigned long long paravirt_rdtscp(unsigned int *aux) { @@ -794,7 +802,8 @@ static inline void set_iopl_mask(unsigned mask) } /* The paravirtualized I/O functions */ -static inline void slow_down_io(void) { +static inline void slow_down_io(void) +{ pv_cpu_ops.io_delay(); #ifdef REALLY_SLOW_IO pv_cpu_ops.io_delay(); -- cgit v1.2.1 From 5269354231c6e960941f6e1fd0737acc003d2353 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:01 -0700 Subject: include/asm-x86/parport.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/parport.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h index 019cbca24a38..3c4ffeb467e9 100644 --- a/include/asm-x86/parport.h +++ b/include/asm-x86/parport.h @@ -1,10 +1,10 @@ #ifndef _ASM_X86_PARPORT_H #define _ASM_X86_PARPORT_H -static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) +static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); +static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) { - return parport_pc_find_isa_ports (autoirq, autodma); + return parport_pc_find_isa_ports(autoirq, autodma); } #endif /* _ASM_X86_PARPORT_H */ -- cgit v1.2.1 From 3cb47d79e99ad7b3a02badd7f64fbb57a1b125b3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:02 -0700 Subject: include/asm-x86/pci_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pci_64.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index 374690314539..df867e5d80b1 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h @@ -1,12 +1,10 @@ #ifndef __x8664_PCI_H #define __x8664_PCI_H - #ifdef __KERNEL__ - #ifdef CONFIG_CALGARY_IOMMU -static inline void* pci_iommu(struct pci_bus *bus) +static inline void *pci_iommu(struct pci_bus *bus) { struct pci_sysdata *sd = bus->sysdata; return sd->iommu; @@ -19,11 +17,10 @@ static inline void set_pci_iommu(struct pci_bus *bus, void *val) } #endif /* CONFIG_CALGARY_IOMMU */ - -extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); -extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); - - +extern int (*pci_config_read)(int seg, int bus, int dev, int fn, + int reg, int len, u32 *value); +extern int (*pci_config_write)(int seg, int bus, int dev, int fn, + int reg, int len, u32 value); extern void pci_iommu_alloc(void); @@ -65,5 +62,4 @@ extern void pci_iommu_alloc(void); #endif /* __KERNEL__ */ - #endif /* __x8664_PCI_H */ -- cgit v1.2.1 From 565c6402db09b1d1b7623ebd629f3adf6b86feaa Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:03 -0700 Subject: include/asm-x86/pci-direct.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pci-direct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h index 6823fa4f1afa..5b21485be573 100644 --- a/include/asm-x86/pci-direct.h +++ b/include/asm-x86/pci-direct.h @@ -4,7 +4,7 @@ #include /* Direct PCI access. This is used for PCI accesses in early boot before - the PCI subsystem works. */ + the PCI subsystem works. */ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); -- cgit v1.2.1 From 69bdb7bcc6b9494b1738c6f6dd3fe553c9c6978e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:04 -0700 Subject: include/asm-x86/pci.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pci.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index c61190cb9e12..ddd8e248fc0a 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h @@ -8,14 +8,13 @@ #include #include - #ifdef __KERNEL__ struct pci_sysdata { int domain; /* PCI domain */ int node; /* NUMA node */ #ifdef CONFIG_X86_64 - void* iommu; /* IOMMU private data */ + void *iommu; /* IOMMU private data */ #endif }; @@ -52,7 +51,7 @@ extern unsigned long pci_mem_start; #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); -struct pci_bus * pcibios_scan_root(int bus); +struct pci_bus *pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); @@ -62,7 +61,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); + enum pci_mmap_state mmap_state, + int write_combine); #ifdef CONFIG_PCI -- cgit v1.2.1 From 46e1abc63e736644265e7ec2897f963a4ace5993 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:05 -0700 Subject: include/asm-x86/pda.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pda.h | 80 +++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index d9dc209c24ad..101fb9e11954 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h @@ -57,34 +57,36 @@ extern struct x8664_pda _proxy_pda; #define pda_offset(field) offsetof(struct x8664_pda, field) -#define pda_to_op(op, field, val) do { \ - typedef typeof(_proxy_pda.field) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ - switch (sizeof(_proxy_pda.field)) { \ - case 2: \ - asm(op "w %1,%%gs:%c2" : \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ - case 4: \ - asm(op "l %1,%%gs:%c2" : \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i" (pda_offset(field))); \ - break; \ - case 8: \ - asm(op "q %1,%%gs:%c2": \ - "+m" (_proxy_pda.field) : \ - "ri" ((T__)val), \ - "i"(pda_offset(field))); \ - break; \ - default: \ - __bad_pda_field(); \ - } \ - } while (0) +#define pda_to_op(op, field, val) \ +do { \ + typedef typeof(_proxy_pda.field) T__; \ + if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ + switch (sizeof(_proxy_pda.field)) { \ + case 2: \ + asm(op "w %1,%%gs:%c2" : \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i"(pda_offset(field))); \ + break; \ + case 4: \ + asm(op "l %1,%%gs:%c2" : \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i" (pda_offset(field))); \ + break; \ + case 8: \ + asm(op "q %1,%%gs:%c2": \ + "+m" (_proxy_pda.field) : \ + "ri" ((T__)val), \ + "i"(pda_offset(field))); \ + break; \ + default: \ + __bad_pda_field(); \ + } \ +} while (0) -#define pda_from_op(op,field) ({ \ +#define pda_from_op(op, field) \ +({ \ typeof(_proxy_pda.field) ret__; \ switch (sizeof(_proxy_pda.field)) { \ case 2: \ @@ -92,23 +94,24 @@ extern struct x8664_pda _proxy_pda; "=r" (ret__) : \ "i" (pda_offset(field)), \ "m" (_proxy_pda.field)); \ - break; \ + break; \ case 4: \ asm(op "l %%gs:%c1,%0": \ "=r" (ret__): \ "i" (pda_offset(field)), \ "m" (_proxy_pda.field)); \ - break; \ + break; \ case 8: \ asm(op "q %%gs:%c1,%0": \ "=r" (ret__) : \ "i" (pda_offset(field)), \ "m" (_proxy_pda.field)); \ - break; \ + break; \ default: \ __bad_pda_field(); \ - } \ - ret__; }) + } \ + ret__; \ +}) #define read_pda(field) pda_from_op("mov", field) #define write_pda(field, val) pda_to_op("mov", field, val) @@ -117,12 +120,13 @@ extern struct x8664_pda _proxy_pda; #define or_pda(field, val) pda_to_op("or", field, val) /* This is not atomic against other CPUs -- CPU preemption needs to be off */ -#define test_and_clear_bit_pda(bit, field) ({ \ - int old__; \ - asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ - : "=r" (old__), "+m" (_proxy_pda.field) \ - : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ - old__; \ +#define test_and_clear_bit_pda(bit, field) \ +({ \ + int old__; \ + asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ + : "=r" (old__), "+m" (_proxy_pda.field) \ + : "dIr" (bit), "i" (pda_offset(field)) : "memory");\ + old__; \ }) #endif -- cgit v1.2.1 From bc9e3be20bab447519e864c4f9e09aae82ed0376 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:06 -0700 Subject: include/asm-x86/percpu.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/percpu.h | 104 ++++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 0dec00f27eb4..736fc3bb8e1e 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h @@ -85,58 +85,62 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); * don't give an lvalue though). */ extern void __bad_percpu_size(void); -#define percpu_to_op(op,var,val) \ - do { \ - typedef typeof(var) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 2: \ - asm(op "w %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 4: \ - asm(op "l %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - } while (0) - -#define percpu_from_op(op,var) \ - ({ \ - typeof(var) ret__; \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 2: \ - asm(op "w "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 4: \ - asm(op "l "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - ret__; }) +#define percpu_to_op(op, var, val) \ +do { \ + typedef typeof(var) T__; \ + if (0) { \ + T__ tmp__; \ + tmp__ = (val); \ + } \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b %1,"__percpu_seg"%0" \ + : "+m" (var) \ + : "ri" ((T__)val)); \ + break; \ + case 2: \ + asm(op "w %1,"__percpu_seg"%0" \ + : "+m" (var) \ + : "ri" ((T__)val)); \ + break; \ + case 4: \ + asm(op "l %1,"__percpu_seg"%0" \ + : "+m" (var) \ + : "ri" ((T__)val)); \ + break; \ + default: __bad_percpu_size(); \ + } \ +} while (0) + +#define percpu_from_op(op, var) \ +({ \ + typeof(var) ret__; \ + switch (sizeof(var)) { \ + case 1: \ + asm(op "b "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 2: \ + asm(op "w "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + case 4: \ + asm(op "l "__percpu_seg"%1,%0" \ + : "=r" (ret__) \ + : "m" (var)); \ + break; \ + default: __bad_percpu_size(); \ + } \ + ret__; \ +}) #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) -#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) -#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) -#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) -#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) +#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) +#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) +#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) +#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) #endif /* !__ASSEMBLY__ */ #endif /* !CONFIG_X86_64 */ #endif /* _ASM_X86_PERCPU_H_ */ -- cgit v1.2.1 From 65e05d15edfdd6ecb4426894cf6e6b5ae97602e4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:08 -0700 Subject: include/asm-x86/pgtable-2level.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable-2level.h | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 701404fab308..46bc52c0eae1 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h @@ -26,7 +26,8 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) native_set_pte(ptep, pte); } -static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, +static inline void native_set_pte_present(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) { native_set_pte(ptep, pte); @@ -37,7 +38,8 @@ static inline void native_pmd_clear(pmd_t *pmdp) native_set_pmd(pmdp, __pmd(0)); } -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) +static inline void native_pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *xp) { *xp = native_make_pte(0); } @@ -61,16 +63,18 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) */ #define PTE_FILE_MAX_BITS 29 -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) +#define pte_to_pgoff(pte) \ + ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5)) -#define pgoff_to_pte(off) \ - ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) +#define pgoff_to_pte(off) \ + ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \ + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) -- cgit v1.2.1 From cf840147d48626d5d86d617cbc5b7cddc1bcae14 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:09 -0700 Subject: include/asm-x86/pgtable_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_32.h | 102 ++++++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 49 deletions(-) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 1e2c0d839528..c4a643674458 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -40,13 +40,13 @@ void paging_init(void); #ifdef CONFIG_X86_PAE # include # define PMD_SIZE (1UL << PMD_SHIFT) -# define PMD_MASK (~(PMD_SIZE-1)) +# define PMD_MASK (~(PMD_SIZE - 1)) #else # include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) @@ -58,21 +58,22 @@ void paging_init(void); * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long) high_memory + \ - 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) +#define VMALLOC_OFFSET (8 * 1024 * 1024) +#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ + & ~(VMALLOC_OFFSET - 1)) #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) +#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ + & PMD_MASK) #ifdef CONFIG_HIGHMEM -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) #else -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif /* @@ -88,16 +89,16 @@ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ -#define pmd_none(x) (!(unsigned long)pmd_val(x)) -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_none(x) (!(unsigned long)pmd_val((x))) +#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) extern int pmd_bad(pmd_t pmd); -#define pmd_bad_v1(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) -#define pmd_bad_v2(x) ((pmd_val(x) \ - & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ - != _KERNPG_TABLE) - +#define pmd_bad_v1(x) \ + (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) +#define pmd_bad_v2(x) \ + (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ + _PAGE_PSE | _PAGE_NX))) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) @@ -123,17 +124,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) } /* - * Macro to mark a page protection value as "uncacheable". On processors which do not support - * it, this is a no-op. + * Macro to mark a page protection value as "uncacheable". + * On processors which do not support it, this is a no-op. */ -#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ - ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) +#define pgprot_noncached(prot) \ + ((boot_cpu_data.x86 > 3) \ + ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ + : (prot)) /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ - #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) /* @@ -142,20 +144,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) * this macro returns the index of the entry in the pgd page which would * control the given virtual address */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_index_k(addr) pgd_index(addr) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#define pgd_index_k(addr) pgd_index((addr)) /* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) static inline int pud_large(pud_t pud) { return 0; } @@ -165,8 +167,8 @@ static inline int pud_large(pud_t pud) { return 0; } * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ -#define pmd_index(address) \ - (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) +#define pmd_index(address) \ + (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] @@ -174,43 +176,45 @@ static inline int pud_large(pud_t pud) { return 0; } * this macro returns the index of the entry in the pte page which would * control the given virtual address */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) +#define pte_index(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) +#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) -#define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page_vaddr(pmd) \ + ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK)) #if defined(CONFIG_HIGHPTE) -#define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) +#define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ + pte_index((address))) +#define pte_offset_map_nested(dir, address) \ + ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ + pte_index((address))) +#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) +#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) #else -#define pte_offset_map(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) +#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #endif /* Clear a kernel PTE and flush it from the TLB */ -#define kpte_clear_flush(ptep, vaddr) \ -do { \ - pte_clear(&init_mm, vaddr, ptep); \ - __flush_tlb_one(vaddr); \ +#define kpte_clear_flush(ptep, vaddr) \ +do { \ + pte_clear(&init_mm, (vaddr), (ptep)); \ + __flush_tlb_one((vaddr)); \ } while (0) /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma,address,pte) do { } while (0) +#define update_mmu_cache(vma, address, pte) do { } while (0) void native_pagetable_setup_start(pgd_t *base); void native_pagetable_setup_done(pgd_t *base); @@ -239,7 +243,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) #define kern_addr_valid(kaddr) (0) #endif -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) #endif /* _I386_PGTABLE_H */ -- cgit v1.2.1 From 4b01fef89a10cedbae9857e76283616af3f177cd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:10 -0700 Subject: include/asm-x86/pgtable-3level.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable-3level.h | 48 +++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 1d763eec740f..8b4a9d44b7f4 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -8,22 +8,26 @@ * Copyright (C) 1999 Ingo Molnar */ -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) - +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ + __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016Lx).\n", \ + __FILE__, __LINE__, &(e), pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016Lx).\n", \ + __FILE__, __LINE__, &(e), pgd_val(e)) static inline int pud_none(pud_t pud) { return pud_val(pud) == 0; } + static inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; } + static inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_PRESENT; @@ -48,7 +52,8 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) * we are justified in merely clearing the PTE present bit, followed * by a set. The ordering here is important. */ -static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, +static inline void native_set_pte_present(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) { ptep->pte_low = 0; @@ -60,15 +65,17 @@ static inline void native_set_pte_present(struct mm_struct *mm, unsigned long ad static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { - set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); + set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); } + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { - set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); } + static inline void native_set_pud(pud_t *pudp, pud_t pud) { - set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); + set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); } /* @@ -76,7 +83,8 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud) * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { ptep->pte_low = 0; smp_wmb(); @@ -107,20 +115,19 @@ static inline void pud_clear(pud_t *pudp) * current pgd to avoid unnecessary TLB flushes. */ pgd = read_cr3(); - if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) + if (__pa(pudp) >= pgd && __pa(pudp) < + (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) write_cr3(pgd); } -#define pud_page(pud) \ -((struct page *) __va(pud_val(pud) & PAGE_MASK)) +#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK)) -#define pud_page_vaddr(pud) \ -((unsigned long) __va(pud_val(pud) & PAGE_MASK)) +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) /* Find an entry in the second-level page table.. */ -#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ - pmd_index(address)) +#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ + pmd_index(address)) #ifdef CONFIG_SMP static inline pte_t native_ptep_get_and_clear(pte_t *ptep) @@ -161,7 +168,8 @@ static inline unsigned long pte_pfn(pte_t pte) * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) -#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) +#define pgoff_to_pte(off) \ + ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ -- cgit v1.2.1 From 7f94401e439dc1137319c48dfec0285f681eb3ad Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:11 -0700 Subject: include/asm-x86/pgtable_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable_64.h | 141 +++++++++++++++++++++++-------------------- 1 file changed, 76 insertions(+), 65 deletions(-) diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 0a5081c98ae1..9fd87d0b6477 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -52,14 +52,18 @@ extern void paging_init(void); #ifndef __ASSEMBLY__ -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pud_ERROR(e) \ - printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pmd_val(e)) +#define pud_ERROR(e) \ + printk("%s:%d: bad pud %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pud_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016lx).\n", \ + __FILE__, __LINE__, &(e), pgd_val(e)) #define pgd_none(x) (!pgd_val(x)) #define pud_none(x) (!pud_val(x)) @@ -87,7 +91,8 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #ifdef CONFIG_SMP return native_make_pte(xchg(&xp->pte, 0)); #else - /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ + /* native_local_ptep_get_and_clear, + but duplicated because of cyclic dependency */ pte_t ret = *xp; native_pte_clear(NULL, 0, xp); return ret; @@ -119,7 +124,7 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) *pgdp = pgd; } -static inline void native_pgd_clear(pgd_t * pgd) +static inline void native_pgd_clear(pgd_t *pgd) { native_set_pgd(pgd, native_make_pgd(0)); } @@ -128,15 +133,15 @@ static inline void native_pgd_clear(pgd_t * pgd) #endif /* !__ASSEMBLY__ */ -#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) +#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE - 1)) +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) -#define MAXMEM _AC(0x3fffffffffff, UL) +#define MAXMEM _AC(0x00003fffffffffff, UL) #define VMALLOC_START _AC(0xffffc20000000000, UL) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) @@ -163,18 +168,18 @@ static inline unsigned long pmd_bad(pmd_t pmd) ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); } -#define pte_none(x) (!pte_val(x)) -#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#define pte_none(x) (!pte_val((x))) +#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ -#define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) +#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ +#define pte_page(x) pfn_to_page(pte_pfn((x))) +#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) /* * Macro to mark a page protection value as "uncacheable". */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) - +#define pgprot_noncached(prot) \ + (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT)) /* * Conversion functions: convert a page and protection to a page entry, @@ -184,77 +189,81 @@ static inline unsigned long pmd_bad(pmd_t pmd) /* * Level 4 access. */ -#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK)) -#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)) -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) -#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) +#define pgd_page_vaddr(pgd) \ + ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) +#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address))) #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) static inline int pgd_large(pgd_t pgd) { return 0; } #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) /* PUD - Level3 access */ /* to find an entry in a page-table-directory. */ -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) -#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT)) -#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) -#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) -#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) +#define pud_page_vaddr(pud) \ + ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK)) +#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) +#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) +#define pud_offset(pgd, address) \ + ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) +#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT) static inline int pud_large(pud_t pte) { - return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == - (_PAGE_PSE|_PAGE_PRESENT); + return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == + (_PAGE_PSE | _PAGE_PRESENT); } /* PMD - Level 2 access */ -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) - -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \ - pmd_index(address)) -#define pmd_none(x) (!pmd_val(x)) -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) -#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) - -#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) -#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) +#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) + +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ + pmd_index(address)) +#define pmd_none(x) (!pmd_val((x))) +#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) +#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) +#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) + +#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) +#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ + _PAGE_FILE }) #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT /* PTE - Level 1 access. */ /* page, protection -> pte */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot)) + +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ - pte_index(address)) + pte_index((address))) /* x86-64 always has all page tables mapped. */ -#define pte_offset_map(dir,address) pte_offset_kernel(dir,address) -#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) +#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) +#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) #define pte_unmap(pte) /* NOP */ -#define pte_unmap_nested(pte) /* NOP */ +#define pte_unmap_nested(pte) /* NOP */ -#define update_mmu_cache(vma,address,pte) do { } while (0) +#define update_mmu_cache(vma, address, pte) do { } while (0) extern int direct_gbpages; /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ + ((offset) << 8) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) -extern int kern_addr_valid(unsigned long addr); +extern int kern_addr_valid(unsigned long addr); extern void cleanup_highmap(void); -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN @@ -267,8 +276,10 @@ extern void cleanup_highmap(void); /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) -#define kc_offset_to_vaddr(o) \ - (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) +#define kc_offset_to_vaddr(o) \ + (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \ + ? ((o) | ~__VIRTUAL_MASK) \ + : (o)) #define __HAVE_ARCH_PTE_SAME #endif /* !__ASSEMBLY__ */ -- cgit v1.2.1 From 3cbaeafeb10e38bce6c8d4764a254260d5a564bd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:12 -0700 Subject: include/asm-x86/pgtable.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable.h | 156 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 120 insertions(+), 36 deletions(-) diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index e814cfe96af2..2ce765070464 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -48,12 +48,15 @@ #endif /* If _PAGE_PRESENT is clear, we use these: */ -#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ +#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, + * saved PTE; unset:swap */ #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; pte_present gives true */ -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ + _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) @@ -64,14 +67,20 @@ #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) - -#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) + +#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ + _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) #define PAGE_COPY PAGE_COPY_NOEXEC -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_NX) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) #ifdef CONFIG_X86_32 #define _PAGE_KERNEL_EXEC \ @@ -142,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern spinlock_t pgd_lock; @@ -152,30 +161,101 @@ extern struct list_head pgd_list; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } -static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } -static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } -static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } - -static inline int pmd_large(pmd_t pte) { - return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == - (_PAGE_PSE|_PAGE_PRESENT); +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_DIRTY; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_RW; +} + +static inline int pte_file(pte_t pte) +{ + return pte_val(pte) & _PAGE_FILE; +} + +static inline int pte_huge(pte_t pte) +{ + return pte_val(pte) & _PAGE_PSE; } -static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } -static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } -static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } -static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } -static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } -static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } -static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } -static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } -static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } -static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } -static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } +static inline int pte_global(pte_t pte) +{ + return pte_val(pte) & _PAGE_GLOBAL; +} + +static inline int pte_exec(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_NX); +} + +static inline int pmd_large(pmd_t pte) +{ + return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == + (_PAGE_PSE | _PAGE_PRESENT); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_PSE); +} + +static inline pte_t pte_clrhuge(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); +} + +static inline pte_t pte_mkglobal(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_GLOBAL); +} + +static inline pte_t pte_clrglobal(pte_t pte) +{ + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); +} extern pteval_t __supported_pte_mask; @@ -342,7 +422,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, }) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { pte_t pte = native_ptep_get_and_clear(ptep); pte_update(mm, addr, ptep); @@ -350,7 +431,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + int full) { pte_t pte; if (full) { @@ -366,7 +449,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long } #define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); pte_update(mm, addr, ptep); -- cgit v1.2.1 From 2c5d516ca70641e26463e8d24344b515a2973c11 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:13 -0700 Subject: include/asm-x86/posix_types_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/posix_types_32.h | 47 +++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h index 015e539cdef5..b031efda37ec 100644 --- a/include/asm-x86/posix_types_32.h +++ b/include/asm-x86/posix_types_32.h @@ -45,32 +45,39 @@ typedef struct { #if defined(__KERNEL__) #undef __FD_SET -#define __FD_SET(fd,fdsetp) \ - __asm__ __volatile__("btsl %1,%0": \ - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) +#define __FD_SET(fd,fdsetp) \ + asm volatile("btsl %1,%0": \ + "+m" (*(__kernel_fd_set *)(fdsetp)) \ + : "r" ((int)(fd))) #undef __FD_CLR -#define __FD_CLR(fd,fdsetp) \ - __asm__ __volatile__("btrl %1,%0": \ - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) +#define __FD_CLR(fd,fdsetp) \ + asm volatile("btrl %1,%0": \ + "+m" (*(__kernel_fd_set *)(fdsetp)) \ + : "r" ((int) (fd))) #undef __FD_ISSET -#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ - unsigned char __result; \ - __asm__ __volatile__("btl %1,%2 ; setb %0" \ - :"=q" (__result) :"r" ((int) (fd)), \ - "m" (*(__kernel_fd_set *) (fdsetp))); \ - __result; })) +#define __FD_ISSET(fd,fdsetp) \ + (__extension__ \ + ({ \ + unsigned char __result; \ + asm volatile("btl %1,%2 ; setb %0" \ + : "=q" (__result) \ + : "r" ((int)(fd)), \ + "m" (*(__kernel_fd_set *)(fdsetp))); \ + __result; \ +})) #undef __FD_ZERO -#define __FD_ZERO(fdsetp) \ -do { \ - int __d0, __d1; \ - __asm__ __volatile__("cld ; rep ; stosl" \ - :"=m" (*(__kernel_fd_set *) (fdsetp)), \ - "=&c" (__d0), "=&D" (__d1) \ - :"a" (0), "1" (__FDSET_LONGS), \ - "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ +#define __FD_ZERO(fdsetp) \ +do { \ + int __d0, __d1; \ + asm volatile("cld ; rep ; stosl" \ + : "=m" (*(__kernel_fd_set *)(fdsetp)), \ + "=&c" (__d0), "=&D" (__d1) \ + : "a" (0), "1" (__FDSET_LONGS), \ + "2" ((__kernel_fd_set *)(fdsetp)) \ + : "memory"); \ } while (0) #endif /* defined(__KERNEL__) */ -- cgit v1.2.1 From 4943aa4ec25ccc7161f4f4fcdd0018a4c1f6d4e8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:14 -0700 Subject: include/asm-x86/posix_types_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/posix_types_64.h | 54 ++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h index 9926aa43775b..d6624c95854a 100644 --- a/include/asm-x86/posix_types_64.h +++ b/include/asm-x86/posix_types_64.h @@ -46,7 +46,7 @@ typedef unsigned long __kernel_old_dev_t; #ifdef __KERNEL__ #undef __FD_SET -static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) +static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; @@ -54,7 +54,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) } #undef __FD_CLR -static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) +static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; @@ -62,7 +62,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) } #undef __FD_ISSET -static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) +static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; @@ -74,36 +74,36 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) * for 256 and 1024-bit fd_sets respectively) */ #undef __FD_ZERO -static __inline__ void __FD_ZERO(__kernel_fd_set *p) +static inline void __FD_ZERO(__kernel_fd_set *p) { unsigned long *tmp = p->fds_bits; int i; if (__builtin_constant_p(__FDSET_LONGS)) { switch (__FDSET_LONGS) { - case 32: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; - tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; - tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; - tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; - tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; - return; - case 16: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; - return; - case 8: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - return; - case 4: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - return; + case 32: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; + tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; + tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; + tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; + tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; + tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; + return; + case 16: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; + tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; + return; + case 8: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + return; + case 4: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + return; } } i = __FDSET_LONGS; -- cgit v1.2.1 From cca2e6f87e3856953503aae2c0b8a1d5628796ef Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:15 -0700 Subject: include/asm-x86/processor.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/processor.h | 73 +++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 35 deletions(-) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index b0dece41dbc4..6e26c7c717a2 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -175,12 +175,12 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { /* ecx is often an input as well as an output. */ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); + asm("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx)); } static inline void load_cr3(pgd_t *pgdir) @@ -427,17 +427,23 @@ static inline unsigned long native_get_debugreg(int regno) switch (regno) { case 0: - asm("mov %%db0, %0" :"=r" (val)); break; + asm("mov %%db0, %0" :"=r" (val)); + break; case 1: - asm("mov %%db1, %0" :"=r" (val)); break; + asm("mov %%db1, %0" :"=r" (val)); + break; case 2: - asm("mov %%db2, %0" :"=r" (val)); break; + asm("mov %%db2, %0" :"=r" (val)); + break; case 3: - asm("mov %%db3, %0" :"=r" (val)); break; + asm("mov %%db3, %0" :"=r" (val)); + break; case 6: - asm("mov %%db6, %0" :"=r" (val)); break; + asm("mov %%db6, %0" :"=r" (val)); + break; case 7: - asm("mov %%db7, %0" :"=r" (val)); break; + asm("mov %%db7, %0" :"=r" (val)); + break; default: BUG(); } @@ -478,14 +484,14 @@ static inline void native_set_iopl_mask(unsigned mask) #ifdef CONFIG_X86_32 unsigned int reg; - __asm__ __volatile__ ("pushfl;" - "popl %0;" - "andl %1, %0;" - "orl %2, %0;" - "pushl %0;" - "popfl" - : "=&r" (reg) - : "i" (~X86_EFLAGS_IOPL), "r" (mask)); + asm volatile ("pushfl;" + "popl %0;" + "andl %1, %0;" + "orl %2, %0;" + "pushl %0;" + "popfl" + : "=&r" (reg) + : "i" (~X86_EFLAGS_IOPL), "r" (mask)); #endif } @@ -523,8 +529,8 @@ static inline void native_swapgs(void) #define set_debugreg(value, register) \ native_set_debugreg(register, value) -static inline void -load_sp0(struct tss_struct *tss, struct thread_struct *thread) +static inline void load_sp0(struct tss_struct *tss, + struct thread_struct *thread) { native_load_sp0(tss, thread); } @@ -680,7 +686,7 @@ static inline unsigned int cpuid_edx(unsigned int op) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { - __asm__ __volatile__("rep; nop" ::: "memory"); + asm volatile("rep; nop" ::: "memory"); } static inline void cpu_relax(void) @@ -694,32 +700,29 @@ static inline void sync_core(void) int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) - : "ebx", "ecx", "edx", "memory"); + : "ebx", "ecx", "edx", "memory"); } -static inline void -__monitor(const void *eax, unsigned long ecx, unsigned long edx) +static inline void __monitor(const void *eax, unsigned long ecx, + unsigned long edx) { /* "monitor %eax, %ecx, %edx;" */ - asm volatile( - ".byte 0x0f, 0x01, 0xc8;" - :: "a" (eax), "c" (ecx), "d"(edx)); + asm volatile(".byte 0x0f, 0x01, 0xc8;" + :: "a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ - asm volatile( - ".byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ - asm volatile( - "sti; .byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); } extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); -- cgit v1.2.1 From 708c5662975518eeea04de10cb11075d48636180 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:16 -0700 Subject: include/asm-x86/proto.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/proto.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 68563c0709ac..9da46af3b0e9 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h @@ -26,7 +26,7 @@ extern int reboot_force; long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); -#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) -#define round_down(x,y) ((x) & ~((y)-1)) +#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) +#define round_down(x, y) ((x) & ~((y) - 1)) #endif -- cgit v1.2.1 From 72f74fa25a468f40781e452f1d2528395090d55f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:17 -0700 Subject: include/asm-x86/ptrace.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/ptrace.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index bc442461ac64..e779f2b26b32 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -140,7 +140,8 @@ extern unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); #ifdef CONFIG_X86_32 -extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); +extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, + int error_code); #else void signal_fault(struct pt_regs *regs, void __user *frame, char *where); #endif @@ -169,8 +170,8 @@ static inline int user_mode(struct pt_regs *regs) static inline int user_mode_vm(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return ((regs->cs & SEGMENT_RPL_MASK) | - (regs->flags & VM_MASK)) >= USER_RPL; + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & VM_MASK)) >= + USER_RPL; #else return user_mode(regs); #endif -- cgit v1.2.1 From 78db4c6be439cd446c94181c73f5e06a89a5aaf3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:18 -0700 Subject: include/asm-x86/reboot.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/reboot.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index e9e3ffc22c07..6b5233b4f84b 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h @@ -3,8 +3,7 @@ struct pt_regs; -struct machine_ops -{ +struct machine_ops { void (*restart)(char *cmd); void (*halt)(void); void (*power_off)(void); -- cgit v1.2.1 From c6fd5d49ec578e1078331b81ca09008fb361a8ba Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:19 -0700 Subject: include/asm-x86/resume-trace.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/resume-trace.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 46f725b0bc82..2557514d7ef6 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h @@ -3,16 +3,17 @@ #include -#define TRACE_RESUME(user) do { \ +#define TRACE_RESUME(user) \ +do { \ if (pm_trace_enabled) { \ void *tracedata; \ asm volatile(_ASM_MOV_UL " $1f,%0\n" \ - ".section .tracedata,\"a\"\n" \ - "1:\t.word %c1\n\t" \ - _ASM_PTR " %c2\n" \ - ".previous" \ - :"=r" (tracedata) \ - : "i" (__LINE__), "i" (__FILE__)); \ + ".section .tracedata,\"a\"\n" \ + "1:\t.word %c1\n\t" \ + _ASM_PTR " %c2\n" \ + ".previous" \ + :"=r" (tracedata) \ + : "i" (__LINE__), "i" (__FILE__)); \ generate_resume_trace(tracedata, user); \ } \ } while (0) -- cgit v1.2.1 From 0f4fc8c1dca86b519fed50be0962c8def8d3d446 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:20 -0700 Subject: include/asm-x86/rio.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/rio.h | 76 +++++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index 97cdcc9887ba..3451c576e6af 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h @@ -11,53 +11,53 @@ #define RIO_TABLE_VERSION 3 struct rio_table_hdr { - u8 version; /* Version number of this data structure */ - u8 num_scal_dev; /* # of Scalability devices */ - u8 num_rio_dev; /* # of RIO I/O devices */ + u8 version; /* Version number of this data structure */ + u8 num_scal_dev; /* # of Scalability devices */ + u8 num_rio_dev; /* # of RIO I/O devices */ } __attribute__((packed)); struct scal_detail { - u8 node_id; /* Scalability Node ID */ - u32 CBAR; /* Address of 1MB register space */ - u8 port0node; /* Node ID port connected to: 0xFF=None */ - u8 port0port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 port1node; /* Node ID port connected to: 0xFF = None */ - u8 port1port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 port2node; /* Node ID port connected to: 0xFF = None */ - u8 port2port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ + u8 node_id; /* Scalability Node ID */ + u32 CBAR; /* Address of 1MB register space */ + u8 port0node; /* Node ID port connected to: 0xFF=None */ + u8 port0port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port1node; /* Node ID port connected to: 0xFF = None */ + u8 port1port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port2node; /* Node ID port connected to: 0xFF = None */ + u8 port2port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ } __attribute__((packed)); struct rio_detail { - u8 node_id; /* RIO Node ID */ - u32 BBAR; /* Address of 1MB register space */ - u8 type; /* Type of device */ - u8 owner_id; /* Node ID of Hurricane that owns this */ - /* node */ - u8 port0node; /* Node ID port connected to: 0xFF=None */ - u8 port0port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 port1node; /* Node ID port connected to: 0xFF=None */ - u8 port1port; /* Port num port connected to: 0,1,2, or */ - /* 0xFF=None */ - u8 first_slot; /* Lowest slot number below this Calgary */ - u8 status; /* Bit 0 = 1 : the XAPIC is used */ - /* = 0 : the XAPIC is not used, ie: */ - /* ints fwded to another XAPIC */ - /* Bits1:7 Reserved */ - u8 WP_index; /* instance index - lower ones have */ - /* lower slot numbers/PCI bus numbers */ - u8 chassis_num; /* 1 based Chassis number */ + u8 node_id; /* RIO Node ID */ + u32 BBAR; /* Address of 1MB register space */ + u8 type; /* Type of device */ + u8 owner_id; /* Node ID of Hurricane that owns this */ + /* node */ + u8 port0node; /* Node ID port connected to: 0xFF=None */ + u8 port0port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 port1node; /* Node ID port connected to: 0xFF=None */ + u8 port1port; /* Port num port connected to: 0,1,2, or */ + /* 0xFF=None */ + u8 first_slot; /* Lowest slot number below this Calgary */ + u8 status; /* Bit 0 = 1 : the XAPIC is used */ + /* = 0 : the XAPIC is not used, ie: */ + /* ints fwded to another XAPIC */ + /* Bits1:7 Reserved */ + u8 WP_index; /* instance index - lower ones have */ + /* lower slot numbers/PCI bus numbers */ + u8 chassis_num; /* 1 based Chassis number */ } __attribute__((packed)); enum { - HURR_SCALABILTY = 0, /* Hurricane Scalability info */ - HURR_RIOIB = 2, /* Hurricane RIOIB info */ - COMPAT_CALGARY = 4, /* Compatibility Calgary */ - ALT_CALGARY = 5, /* Second Planar Calgary */ + HURR_SCALABILTY = 0, /* Hurricane Scalability info */ + HURR_RIOIB = 2, /* Hurricane RIOIB info */ + COMPAT_CALGARY = 4, /* Compatibility Calgary */ + ALT_CALGARY = 5, /* Second Planar Calgary */ }; /* -- cgit v1.2.1 From 6e5609a97acef44440f233ad435dd0ab563608f9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:21 -0700 Subject: include/asm-x86/rwsem.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/rwsem.h | 169 ++++++++++++++++++++++++------------------------ 1 file changed, 86 insertions(+), 83 deletions(-) diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 520a379f4b80..750f2a3542b3 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h @@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore * /* * the semaphore definition */ -struct rw_semaphore { - signed long count; + #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +struct rw_semaphore { + signed long count; spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -78,11 +80,13 @@ struct rw_semaphore { #endif -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } +#define __RWSEM_INITIALIZER(name) \ +{ \ + RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ +} -#define DECLARE_RWSEM(name) \ +#define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) extern void __init_rwsem(struct rw_semaphore *sem, const char *name, @@ -100,16 +104,16 @@ do { \ */ static inline void __down_read(struct rw_semaphore *sem) { - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ - " jns 1f\n" - " call call_rwsem_down_read_failed\n" - "1:\n\t" - "# ending down_read\n\t" - : "+m" (sem->count) - : "a" (sem) - : "memory", "cc"); + asm volatile("# beginning down_read\n\t" + LOCK_PREFIX " incl (%%eax)\n\t" + /* adds 0x00000001, returns the old value */ + " jns 1f\n" + " call call_rwsem_down_read_failed\n" + "1:\n\t" + "# ending down_read\n\t" + : "+m" (sem->count) + : "a" (sem) + : "memory", "cc"); } /* @@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value static inline int __down_read_trylock(struct rw_semaphore *sem) { __s32 result, tmp; - __asm__ __volatile__( - "# beginning __down_read_trylock\n\t" - " movl %0,%1\n\t" - "1:\n\t" - " movl %1,%2\n\t" - " addl %3,%2\n\t" - " jle 2f\n\t" -LOCK_PREFIX " cmpxchgl %2,%0\n\t" - " jnz 1b\n\t" - "2:\n\t" - "# ending __down_read_trylock\n\t" - : "+m" (sem->count), "=&a" (result), "=&r" (tmp) - : "i" (RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); - return result>=0 ? 1 : 0; + asm volatile("# beginning __down_read_trylock\n\t" + " movl %0,%1\n\t" + "1:\n\t" + " movl %1,%2\n\t" + " addl %3,%2\n\t" + " jle 2f\n\t" + LOCK_PREFIX " cmpxchgl %2,%0\n\t" + " jnz 1b\n\t" + "2:\n\t" + "# ending __down_read_trylock\n\t" + : "+m" (sem->count), "=&a" (result), "=&r" (tmp) + : "i" (RWSEM_ACTIVE_READ_BIAS) + : "memory", "cc"); + return result >= 0 ? 1 : 0; } /* @@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) int tmp; tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %%edx,%%edx\n\t" /* was the count 0 before? */ - " jz 1f\n" - " call call_rwsem_down_write_failed\n" - "1:\n" - "# ending down_write" - : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) - : "memory", "cc"); + asm volatile("# beginning down_write\n\t" + LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" + /* subtract 0x0000ffff, returns the old value */ + " testl %%edx,%%edx\n\t" + /* was the count 0 before? */ + " jz 1f\n" + " call call_rwsem_down_write_failed\n" + "1:\n" + "# ending down_write" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (tmp) + : "memory", "cc"); } static inline void __down_write(struct rw_semaphore *sem) @@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem) { signed long ret = cmpxchg(&sem->count, - RWSEM_UNLOCKED_VALUE, + RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); if (ret == RWSEM_UNLOCKED_VALUE) return 1; @@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) static inline void __up_read(struct rw_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" - "1:\n" - "# ending __up_read\n" - : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) - : "memory", "cc"); + asm volatile("# beginning __up_read\n\t" + LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" + /* subtracts 1, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" + "1:\n" + "# ending __up_read\n" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (tmp) + : "memory", "cc"); } /* @@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu */ static inline void __up_write(struct rw_semaphore *sem) { - __asm__ __volatile__( - "# beginning __up_write\n\t" - " movl %2,%%edx\n\t" -LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jz 1f\n" - " call call_rwsem_wake\n" - "1:\n\t" - "# ending __up_write\n" - : "+m" (sem->count) - : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc", "edx"); + asm volatile("# beginning __up_write\n\t" + " movl %2,%%edx\n\t" + LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" + /* tries to transition + 0xffff0001 -> 0x00000000 */ + " jz 1f\n" + " call call_rwsem_wake\n" + "1:\n\t" + "# ending __up_write\n" + : "+m" (sem->count) + : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) + : "memory", "cc", "edx"); } /* @@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> */ static inline void __downgrade_write(struct rw_semaphore *sem) { - __asm__ __volatile__( - "# beginning __downgrade_write\n\t" -LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ - " jns 1f\n\t" - " call call_rwsem_downgrade_wake\n" - "1:\n\t" - "# ending __downgrade_write\n" - : "+m" (sem->count) - : "a" (sem), "i" (-RWSEM_WAITING_BIAS) - : "memory", "cc"); + asm volatile("# beginning __downgrade_write\n\t" + LOCK_PREFIX " addl %2,(%%eax)\n\t" + /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ + " jns 1f\n\t" + " call call_rwsem_downgrade_wake\n" + "1:\n\t" + "# ending __downgrade_write\n" + : "+m" (sem->count) + : "a" (sem), "i" (-RWSEM_WAITING_BIAS) + : "memory", "cc"); } /* @@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) { - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - : "+m" (sem->count) - : "ir" (delta)); + asm volatile(LOCK_PREFIX "addl %1,%0" + : "+m" (sem->count) + : "ir" (delta)); } /* @@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) { int tmp = delta; - __asm__ __volatile__( -LOCK_PREFIX "xadd %0,%1" - : "+r" (tmp), "+m" (sem->count) - : : "memory"); + asm volatile(LOCK_PREFIX "xadd %0,%1" + : "+r" (tmp), "+m" (sem->count) + : : "memory"); - return tmp+delta; + return tmp + delta; } static inline int rwsem_is_locked(struct rw_semaphore *sem) -- cgit v1.2.1 From 915cd5aa0adbe0f62b7a56d6eaf6908b47f80395 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:25 -0700 Subject: include/asm-x86/setup.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/setup.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index f745de211191..fa6763af8d26 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h @@ -55,8 +55,8 @@ struct e820entry; char * __init machine_specific_memory_setup(void); char *memory_setup(void); -int __init copy_e820_map(struct e820entry * biosmap, int nr_map); -int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); +int __init copy_e820_map(struct e820entry *biosmap, int nr_map); +int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map); void __init add_memory_region(unsigned long long start, unsigned long long size, int type); -- cgit v1.2.1 From 895b7643d6d2d04761344e18a2473255f2551e9e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:26 -0700 Subject: include/asm-x86/sigcontext32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/sigcontext32.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 6ffab4fd593a..57a9686fb491 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h @@ -26,7 +26,7 @@ struct _fpstate_ia32 { __u32 cw; __u32 sw; __u32 tag; /* not compatible to 64bit twd */ - __u32 ipoff; + __u32 ipoff; __u32 cssel; __u32 dataoff; __u32 datasel; @@ -39,7 +39,7 @@ struct _fpstate_ia32 { __u32 mxcsr; __u32 reserved; struct _fpxreg _fxsr_st[8]; - struct _xmmreg _xmm[8]; /* It's actually 16 */ + struct _xmmreg _xmm[8]; /* It's actually 16 */ __u32 padding[56]; }; -- cgit v1.2.1 From af1fec15de17086864fc3917e21a31e303ec0e91 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:27 -0700 Subject: include/asm-x86/sigcontext.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/sigcontext.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index d743947f4c77..2f9c884d2c0f 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h @@ -79,7 +79,7 @@ struct sigcontext { unsigned long flags; unsigned long sp_at_signal; unsigned short ss, __ssh; - struct _fpstate __user * fpstate; + struct _fpstate __user *fpstate; unsigned long oldmask; unsigned long cr2; }; @@ -107,7 +107,7 @@ struct sigcontext { unsigned long eflags; unsigned long esp_at_signal; unsigned short ss, __ssh; - struct _fpstate __user * fpstate; + struct _fpstate __user *fpstate; unsigned long oldmask; unsigned long cr2; }; @@ -121,7 +121,8 @@ struct sigcontext { struct _fpstate { __u16 cwd; __u16 swd; - __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ + __u16 twd; /* Note this is not the same as the + 32bit/x87/FSAVE twd */ __u16 fop; __u64 rip; __u64 rdp; -- cgit v1.2.1 From 9551b12a51bab7058ad486ba96fd0c27fdafe922 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:28 -0700 Subject: include/asm-x86/signal.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/signal.h | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index aee7eca585ab..f15186d39c69 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h @@ -185,61 +185,61 @@ typedef struct sigaltstack { #define __HAVE_ARCH_SIG_BITOPS -#define sigaddset(set,sig) \ - (__builtin_constantp(sig) ? \ - __const_sigaddset((set),(sig)) : \ - __gen_sigaddset((set),(sig))) +#define sigaddset(set,sig) \ + (__builtin_constantp(sig) \ + ? __const_sigaddset((set), (sig)) \ + : __gen_sigaddset((set), (sig))) -static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) +static inline void __gen_sigaddset(sigset_t *set, int _sig) { - __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); + asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void __const_sigaddset(sigset_t *set, int _sig) +static inline void __const_sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); } -#define sigdelset(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigdelset((set),(sig)) : \ - __gen_sigdelset((set),(sig))) +#define sigdelset(set, sig) \ + (__builtin_constant_p(sig) \ + ? __const_sigdelset((set), (sig)) \ + : __gen_sigdelset((set), (sig))) -static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) +static inline void __gen_sigdelset(sigset_t *set, int _sig) { - __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); + asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void __const_sigdelset(sigset_t *set, int _sig) +static inline void __const_sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); } -static __inline__ int __const_sigismember(sigset_t *set, int _sig) +static inline int __const_sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } -static __inline__ int __gen_sigismember(sigset_t *set, int _sig) +static inline int __gen_sigismember(sigset_t *set, int _sig) { int ret; - __asm__("btl %2,%1\n\tsbbl %0,%0" - : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); + asm("btl %2,%1\n\tsbbl %0,%0" + : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); return ret; } -#define sigismember(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigismember((set),(sig)) : \ - __gen_sigismember((set),(sig))) +#define sigismember(set, sig) \ + (__builtin_constant_p(sig) \ + ? __const_sigismember((set), (sig)) \ + : __gen_sigismember((set), (sig))) -static __inline__ int sigfindinword(unsigned long word) +static inline int sigfindinword(unsigned long word) { - __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); + asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); return word; } -- cgit v1.2.1 From 2fec394adf9445ae6a21c5e67d9ad3f6f5b7c8d1 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:29 -0700 Subject: include/asm-x86/smp_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/smp_32.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index f861d0415171..cb3ada2fedbf 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -18,8 +18,8 @@ extern cpumask_t cpu_callin_map; -extern void (*mtrr_hook) (void); -extern void zap_low_mappings (void); +extern void (*mtrr_hook)(void); +extern void zap_low_mappings(void); #ifdef CONFIG_SMP /* @@ -44,7 +44,7 @@ static inline int num_booting_cpus(void) #ifdef CONFIG_X86_LOCAL_APIC -static __inline int logical_smp_processor_id(void) +static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -- cgit v1.2.1 From ceb7ce1052a9087bd4752424f253b883ec5e1cec Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:30 -0700 Subject: include/asm-x86/smp_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index fd709cbba4d1..c53a011bb91c 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -24,9 +24,9 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), #define raw_smp_processor_id() read_pda(cpunumber) #define stack_smp_processor_id() \ - ({ \ +({ \ struct thread_info *ti; \ - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ ti->cpu; \ }) @@ -46,7 +46,7 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() smp_processor_id() -static __inline int logical_smp_processor_id(void) +static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -- cgit v1.2.1 From d3bf60a6e48c9a451cac345c0ad57552bb299992 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:31 -0700 Subject: include/asm-x86/spinlock.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 105 +++++++++++++++++++++------------------------ 1 file changed, 50 insertions(+), 55 deletions(-) diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 23804c1890ff..47dfe2607bb1 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) { short inc = 0x0100; - __asm__ __volatile__ ( + asm volatile ( LOCK_PREFIX "xaddw %w0, %1\n" "1:\t" "cmpb %h0, %b0\n\t" @@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) /* don't need lfence here, because loads are in-order */ "jmp 1b\n" "2:" - :"+Q" (inc), "+m" (lock->slock) + : "+Q" (inc), "+m" (lock->slock) : - :"memory", "cc"); + : "memory", "cc"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) int tmp; short new; - asm volatile( - "movw %2,%w0\n\t" - "cmpb %h0,%b0\n\t" - "jne 1f\n\t" - "movw %w0,%w1\n\t" - "incb %h1\n\t" - "lock ; cmpxchgw %w1,%2\n\t" - "1:" - "sete %b1\n\t" - "movzbl %b1,%0\n\t" - :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) - : - : "memory", "cc"); + asm volatile("movw %2,%w0\n\t" + "cmpb %h0,%b0\n\t" + "jne 1f\n\t" + "movw %w0,%w1\n\t" + "incb %h1\n\t" + "lock ; cmpxchgw %w1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) + : + : "memory", "cc"); return tmp; } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - UNLOCK_LOCK_PREFIX "incb %0" - :"+m" (lock->slock) - : - :"memory", "cc"); + asm volatile(UNLOCK_LOCK_PREFIX "incb %0" + : "+m" (lock->slock) + : + : "memory", "cc"); } #else static inline int __raw_spin_is_locked(raw_spinlock_t *lock) @@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) int inc = 0x00010000; int tmp; - __asm__ __volatile__ ( - "lock ; xaddl %0, %1\n" - "movzwl %w0, %2\n\t" - "shrl $16, %0\n\t" - "1:\t" - "cmpl %0, %2\n\t" - "je 2f\n\t" - "rep ; nop\n\t" - "movzwl %1, %2\n\t" - /* don't need lfence here, because loads are in-order */ - "jmp 1b\n" - "2:" - :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) - : - :"memory", "cc"); + asm volatile("lock ; xaddl %0, %1\n" + "movzwl %w0, %2\n\t" + "shrl $16, %0\n\t" + "1:\t" + "cmpl %0, %2\n\t" + "je 2f\n\t" + "rep ; nop\n\t" + "movzwl %1, %2\n\t" + /* don't need lfence here, because loads are in-order */ + "jmp 1b\n" + "2:" + : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) + : + : "memory", "cc"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) int tmp; int new; - asm volatile( - "movl %2,%0\n\t" - "movl %0,%1\n\t" - "roll $16, %0\n\t" - "cmpl %0,%1\n\t" - "jne 1f\n\t" - "addl $0x00010000, %1\n\t" - "lock ; cmpxchgl %1,%2\n\t" - "1:" - "sete %b1\n\t" - "movzbl %b1,%0\n\t" - :"=&a" (tmp), "=r" (new), "+m" (lock->slock) - : - : "memory", "cc"); + asm volatile("movl %2,%0\n\t" + "movl %0,%1\n\t" + "roll $16, %0\n\t" + "cmpl %0,%1\n\t" + "jne 1f\n\t" + "addl $0x00010000, %1\n\t" + "lock ; cmpxchgl %1,%2\n\t" + "1:" + "sete %b1\n\t" + "movzbl %b1,%0\n\t" + : "=&a" (tmp), "=r" (new), "+m" (lock->slock) + : + : "memory", "cc"); return tmp; } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - UNLOCK_LOCK_PREFIX "incw %0" - :"+m" (lock->slock) - : - :"memory", "cc"); + asm volatile(UNLOCK_LOCK_PREFIX "incw %0" + : "+m" (lock->slock) + : + : "memory", "cc"); } #endif -- cgit v1.2.1 From 7f3a9508b5778034091e00e696dc17ca98b8a84c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:32 -0700 Subject: include/asm-x86/srat.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/srat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h index 165ab4bdc02b..f4bba131d068 100644 --- a/include/asm-x86/srat.h +++ b/include/asm-x86/srat.h @@ -1,5 +1,5 @@ /* - * Some of the code in this file has been gleaned from the 64 bit + * Some of the code in this file has been gleaned from the 64 bit * discontigmem support code base. * * Copyright (C) 2002, IBM Corp. -- cgit v1.2.1 From 06b0f574eafdd26d325ce5ab3c38522d0cdd7b7c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:33 -0700 Subject: include/asm-x86/string_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/string_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index c5d13a86dea7..b49369ad9a61 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h @@ -3,7 +3,7 @@ #ifdef __KERNEL__ -/* Let gcc decide wether to inline or use the out of line functions */ +/* Let gcc decide whether to inline or use the out of line functions */ #define __HAVE_ARCH_STRCPY extern char *strcpy(char *dest, const char *src); -- cgit v1.2.1 From 953b2f1ed6f2642e58e7c5b8c1385d132f1b5685 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:34 -0700 Subject: include/asm-x86/string_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/string_64.h | 66 ++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index e583da7918fb..52b5ab383395 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h @@ -3,26 +3,24 @@ #ifdef __KERNEL__ -/* Written 2002 by Andi Kleen */ +/* Written 2002 by Andi Kleen */ -/* Only used for special circumstances. Stolen from i386/string.h */ -static __always_inline void * -__inline_memcpy(void * to, const void * from, size_t n) +/* Only used for special circumstances. Stolen from i386/string.h */ +static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) { -unsigned long d0, d1, d2; -__asm__ __volatile__( - "rep ; movsl\n\t" - "testb $2,%b4\n\t" - "je 1f\n\t" - "movsw\n" - "1:\ttestb $1,%b4\n\t" - "je 2f\n\t" - "movsb\n" - "2:" - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) - : "memory"); -return (to); + unsigned long d0, d1, d2; + asm volatile("rep ; movsl\n\t" + "testb $2,%b4\n\t" + "je 1f\n\t" + "movsw\n" + "1:\ttestb $1,%b4\n\t" + "je 2f\n\t" + "movsb\n" + "2:" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) + : "memory"); + return to; } /* Even with __builtin_ the compiler may decide to use the out of line @@ -32,28 +30,30 @@ return (to); #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 extern void *memcpy(void *to, const void *from, size_t len); #else -extern void *__memcpy(void *to, const void *from, size_t len); -#define memcpy(dst,src,len) \ - ({ size_t __len = (len); \ - void *__ret; \ - if (__builtin_constant_p(len) && __len >= 64) \ - __ret = __memcpy((dst),(src),__len); \ - else \ - __ret = __builtin_memcpy((dst),(src),__len); \ - __ret; }) +extern void *__memcpy(void *to, const void *from, size_t len); +#define memcpy(dst, src, len) \ +({ \ + size_t __len = (len); \ + void *__ret; \ + if (__builtin_constant_p(len) && __len >= 64) \ + __ret = __memcpy((dst), (src), __len); \ + else \ + __ret = __builtin_memcpy((dst), (src), __len); \ + __ret; \ +}) #endif #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); #define __HAVE_ARCH_MEMMOVE -void * memmove(void * dest,const void *src,size_t count); +void *memmove(void *dest, const void *src, size_t count); -int memcmp(const void * cs,const void * ct,size_t count); -size_t strlen(const char * s); -char *strcpy(char * dest,const char *src); -char *strcat(char * dest, const char * src); -int strcmp(const char * cs,const char * ct); +int memcmp(const void *cs, const void *ct, size_t count); +size_t strlen(const char *s); +char *strcpy(char *dest, const char *src); +char *strcat(char *dest, const char *src); +int strcmp(const char *cs, const char *ct); #endif /* __KERNEL__ */ -- cgit v1.2.1 From cf030ebd40e37cb11d1efa6677890f40f21e16f4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:35 -0700 Subject: include/asm-x86/suspend_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/suspend_32.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 1bbda3ad7796..24e1c080aa8a 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h @@ -10,7 +10,7 @@ static inline int arch_prepare_suspend(void) { return 0; } /* image of the saved processor state */ struct saved_context { - u16 es, fs, gs, ss; + u16 es, fs, gs, ss; unsigned long cr0, cr2, cr3, cr4; struct desc_ptr gdt; struct desc_ptr idt; @@ -32,11 +32,11 @@ extern unsigned long saved_edi; static inline void acpi_save_register_state(unsigned long return_point) { saved_eip = return_point; - asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); - asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); - asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); - asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); - asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); + asm volatile("movl %%esp,%0" : "=m" (saved_esp)); + asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); + asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); + asm volatile("movl %%edi,%0" : "=m" (saved_edi)); + asm volatile("movl %%esi,%0" : "=m" (saved_esi)); } #define acpi_restore_register_state() do {} while (0) -- cgit v1.2.1 From 1b17fce6078ab0672f6702097680b65124de5f05 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:36 -0700 Subject: include/asm-x86/suspend_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/suspend_64.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index 2eb92cb81a0d..dc3262b43072 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h @@ -9,8 +9,7 @@ #include #include -static inline int -arch_prepare_suspend(void) +static inline int arch_prepare_suspend(void) { return 0; } @@ -25,7 +24,7 @@ arch_prepare_suspend(void) */ struct saved_context { struct pt_regs regs; - u16 ds, es, fs, gs, ss; + u16 ds, es, fs, gs, ss; unsigned long gs_base, gs_kernel_base, fs_base; unsigned long cr0, cr2, cr3, cr4, cr8; unsigned long efer; -- cgit v1.2.1 From a4c2d7d9285500a9b229bb7ddc7abe0212a0dab0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:37 -0700 Subject: include/asm-x86/swiotlb.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/swiotlb.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index f9c589539a82..f5d9e74b1e4a 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h @@ -8,15 +8,15 @@ extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags); + dma_addr_t *dma_handle, gfp_t flags); extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); + size_t size, int dir); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); + dma_addr_t dev_addr, + size_t size, int dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); + dma_addr_t dev_addr, + size_t size, int dir); extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, @@ -26,18 +26,18 @@ extern void swiotlb_sync_single_range_for_device(struct device *hwdev, unsigned long offset, size_t size, int dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); + struct scatterlist *sg, int nelems, + int dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); + struct scatterlist *sg, int nelems, + int dir); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); + int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); + int nents, int direction); extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); -extern void swiotlb_free_coherent (struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); +extern void swiotlb_free_coherent(struct device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); -- cgit v1.2.1 From 26b7fcc4bde28237a906597a809b149fb06713b0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:38 -0700 Subject: include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/sync_bitops.h | 56 +++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index bc249f40e0ee..f1078a5e4ed7 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -13,7 +13,7 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#define ADDR (*(volatile long *) addr) +#define ADDR (*(volatile long *)addr) /** * sync_set_bit - Atomically set a bit in memory @@ -26,12 +26,12 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_set_bit(int nr, volatile unsigned long * addr) +static inline void sync_set_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btsl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void sync_clear_bit(int nr, volatile unsigned long * addr) +static inline void sync_clear_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btrl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_change_bit(int nr, volatile unsigned long * addr) +static inline void sync_change_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btcl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) +static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } -- cgit v1.2.1 From c5386c200f55940eeeb827df172edf2e0305f23b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:39 -0700 Subject: include/asm-x86/system.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/system.h | 104 +++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 53 deletions(-) diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 33b0017156a7..a2f04cd79b29 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h @@ -38,35 +38,33 @@ do { \ */ \ unsigned long ebx, ecx, edx, esi, edi; \ \ - asm volatile( \ - "pushfl \n\t" /* save flags */ \ - "pushl %%ebp \n\t" /* save EBP */ \ - "movl %%esp,%[prev_sp] \n\t" /* save ESP */ \ - "movl %[next_sp],%%esp \n\t" /* restore ESP */ \ - "movl $1f,%[prev_ip] \n\t" /* save EIP */ \ - "pushl %[next_ip] \n\t" /* restore EIP */ \ - "jmp __switch_to \n" /* regparm call */ \ - "1: \t" \ - "popl %%ebp \n\t" /* restore EBP */ \ - "popfl \n" /* restore flags */ \ + asm volatile("pushfl\n\t" /* save flags */ \ + "pushl %%ebp\n\t" /* save EBP */ \ + "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ + "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ + "pushl %[next_ip]\n\t" /* restore EIP */ \ + "jmp __switch_to\n" /* regparm call */ \ + "1:\t" \ + "popl %%ebp\n\t" /* restore EBP */ \ + "popfl\n" /* restore flags */ \ \ - /* output parameters */ \ - : [prev_sp] "=m" (prev->thread.sp), \ - [prev_ip] "=m" (prev->thread.ip), \ - "=a" (last), \ + /* output parameters */ \ + : [prev_sp] "=m" (prev->thread.sp), \ + [prev_ip] "=m" (prev->thread.ip), \ + "=a" (last), \ \ - /* clobbered output registers: */ \ - "=b" (ebx), "=c" (ecx), "=d" (edx), \ - "=S" (esi), "=D" (edi) \ - \ - /* input parameters: */ \ - : [next_sp] "m" (next->thread.sp), \ - [next_ip] "m" (next->thread.ip), \ - \ - /* regparm parameters for __switch_to(): */ \ - [prev] "a" (prev), \ - [next] "d" (next) \ - ); \ + /* clobbered output registers: */ \ + "=b" (ebx), "=c" (ecx), "=d" (edx), \ + "=S" (esi), "=D" (edi) \ + \ + /* input parameters: */ \ + : [next_sp] "m" (next->thread.sp), \ + [next_ip] "m" (next->thread.ip), \ + \ + /* regparm parameters for __switch_to(): */ \ + [prev] "a" (prev), \ + [next] "d" (next)); \ } while (0) /* @@ -146,35 +144,34 @@ extern void load_gs_index(unsigned); */ #define loadsegment(seg, value) \ asm volatile("\n" \ - "1:\t" \ - "movl %k0,%%" #seg "\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\t" \ - "movl %k1, %%" #seg "\n\t" \ - "jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : :"r" (value), "r" (0)) + "1:\t" \ + "movl %k0,%%" #seg "\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:\t" \ + "movl %k1, %%" #seg "\n\t" \ + "jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b,3b) \ + : :"r" (value), "r" (0)) /* * Save a segment register away */ -#define savesegment(seg, value) \ +#define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; - __asm__("lsll %1,%0" - :"=r" (__limit):"r" (segment)); - return __limit+1; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); + return __limit + 1; } static inline void native_clts(void) { - asm volatile ("clts"); + asm volatile("clts"); } /* @@ -189,43 +186,43 @@ static unsigned long __force_order; static inline unsigned long native_read_cr0(void) { unsigned long val; - asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr0(unsigned long val) { - asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr2(void) { unsigned long val; - asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr2(unsigned long val) { - asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr3(void) { unsigned long val; - asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr3(unsigned long val) { - asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr4(void) { unsigned long val; - asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); return val; } @@ -237,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void) #ifdef CONFIG_X86_32 asm volatile("1: mov %%cr4, %0\n" "2:\n" - _ASM_EXTABLE(1b,2b) + _ASM_EXTABLE(1b, 2b) : "=r" (val), "=m" (__force_order) : "0" (0)); #else val = native_read_cr4(); @@ -247,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void) static inline void native_write_cr4(unsigned long val) { - asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); } #ifdef CONFIG_X86_64 @@ -268,6 +265,7 @@ static inline void native_wbinvd(void) { asm volatile("wbinvd": : :"memory"); } + #ifdef CONFIG_PARAVIRT #include #else @@ -300,7 +298,7 @@ static inline void clflush(volatile void *__p) asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); } -#define nop() __asm__ __volatile__ ("nop") +#define nop() asm volatile ("nop") void disable_hlt(void); void enable_hlt(void); @@ -399,7 +397,7 @@ void default_idle(void); # define smp_wmb() barrier() #endif #define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() -- cgit v1.2.1 From 7c4d4784db93496791b798e0abab1d056b192ad0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:40 -0700 Subject: include/asm-x86/tce.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/tce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h index cd955d3d112f..b1a4ea00df78 100644 --- a/include/asm-x86/tce.h +++ b/include/asm-x86/tce.h @@ -39,7 +39,7 @@ struct iommu_table; #define TCE_RPN_MASK 0x0000fffffffff000ULL extern void tce_build(struct iommu_table *tbl, unsigned long index, - unsigned int npages, unsigned long uaddr, int direction); + unsigned int npages, unsigned long uaddr, int direction); extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); extern void * __init alloc_tce_table(void); extern void __init free_tce_table(void *tbl); -- cgit v1.2.1 From 89917f28f3377fa7e38a51e9208e83b7b92542ee Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:42 -0700 Subject: include/asm-x86/thread_info_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/thread_info_32.h | 88 +++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 42 deletions(-) diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 5bd508260ffb..4e053fa561a9 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h @@ -20,7 +20,8 @@ * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line * - this struct shares the supervisor stack pages - * - if the contents of this structure are changed, the assembly constants must also be changed + * - if the contents of this structure are changed, + * the assembly constants must also be changed */ #ifndef __ASSEMBLY__ @@ -30,18 +31,16 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - - + int preempt_count; /* 0 => preemptable, + <0 => BUG */ mm_segment_t addr_limit; /* thread address space: - 0-0xBFFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread + 0-0xBFFFFFFF user-thread + 0-0xFFFFFFFF kernel-thread */ void *sysenter_return; struct restart_block restart_block; - - unsigned long previous_esp; /* ESP of the previous stack in case - of nested (IRQ) stacks + unsigned long previous_esp; /* ESP of the previous stack in + case of nested (IRQ) stacks */ __u8 supervisor_stack[0]; }; @@ -90,15 +89,16 @@ register unsigned long current_stack_pointer asm("esp") __used; /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { - return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); + return (struct thread_info *) + (current_stack_pointer & ~(THREAD_SIZE - 1)); } /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE -#define alloc_thread_info(tsk) ((struct thread_info *) \ - __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE))) +#define alloc_thread_info(tsk) ((struct thread_info *) \ + __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE))) #else -#define alloc_thread_info(tsk) ((struct thread_info *) \ +#define alloc_thread_info(tsk) ((struct thread_info *) \ __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) #endif @@ -107,7 +107,7 @@ static inline struct thread_info *current_thread_info(void) #else /* !__ASSEMBLY__ */ /* how to get the thread information struct from ASM */ -#define GET_THREAD_INFO(reg) \ +#define GET_THREAD_INFO(reg) \ movl $-THREAD_SIZE, reg; \ andl %esp, reg @@ -119,14 +119,16 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags - * - these are process state flags that various assembly files may need to access + * - these are process state flags that various + * assembly files may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ +#define TIF_SINGLESTEP 3 /* restore singlestep on return to + user mode */ #define TIF_IRET 4 /* return with iret */ #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ @@ -143,36 +145,36 @@ static inline struct thread_info *current_thread_info(void) #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ -#define _TIF_SYSCALL_TRACE (1<status & TS_POLLING) -- cgit v1.2.1 From b98fff30223799c5df444fef1ebcfcddf310f740 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:43 -0700 Subject: include/asm-x86/thread_info_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/thread_info_64.h | 78 +++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 37 deletions(-) diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index 6c9b214b8fc3..1e5c6f6152cd 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h @@ -29,9 +29,9 @@ struct thread_info { __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - - mm_segment_t addr_limit; + int preempt_count; /* 0 => preemptable, + <0 => BUG */ + mm_segment_t addr_limit; struct restart_block restart_block; #ifdef CONFIG_IA32_EMULATION void __user *sysenter_return; @@ -61,17 +61,17 @@ struct thread_info { #define init_stack (init_thread_union.stack) static inline struct thread_info *current_thread_info(void) -{ +{ struct thread_info *ti; ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); - return ti; + return ti; } /* do not use in interrupt context */ static inline struct thread_info *stack_thread_info(void) { struct thread_info *ti; - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); + asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); return ti; } @@ -82,8 +82,8 @@ static inline struct thread_info *stack_thread_info(void) #define THREAD_FLAGS GFP_KERNEL #endif -#define alloc_thread_info(tsk) \ - ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER)) +#define alloc_thread_info(tsk) \ + ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) @@ -98,7 +98,8 @@ static inline struct thread_info *stack_thread_info(void) /* * thread information flags - * - these are process state flags that various assembly files may need to access + * - these are process state flags that various assembly files + * may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW * Warning: layout of LSW is hardcoded in entry.S @@ -114,7 +115,7 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ /* 16 free */ -#define TIF_IA32 17 /* 32bit process */ +#define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ #define TIF_ABI_PENDING 19 #define TIF_MEMDIE 20 @@ -126,39 +127,40 @@ static inline struct thread_info *stack_thread_info(void) #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ -#define _TIF_SYSCALL_TRACE (1<status & TS_POLLING) -- cgit v1.2.1 From 94cf8de0a0aff39c7b7785af4fc938ecacb79b7c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:45 -0700 Subject: include/asm-x86/tlbflush.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/tlbflush.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 3998709ed637..0c0674d94255 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h @@ -32,7 +32,7 @@ static inline void __native_flush_tlb_global(void) static inline void __native_flush_tlb_single(unsigned long addr) { - __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } static inline void __flush_tlb_all(void) @@ -134,8 +134,7 @@ void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, #define TLBSTATE_LAZY 2 #ifdef CONFIG_X86_32 -struct tlb_state -{ +struct tlb_state { struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; -- cgit v1.2.1 From 5d7d03b81af05f3c291b5c6be621a2b53d187e09 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:46 -0700 Subject: include/asm-x86/topology.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/topology.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index dada89e5b152..8d1a1f3d21b4 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -71,7 +71,7 @@ static inline int cpu_to_node(int cpu) #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (x86_cpu_to_node_map_early_ptr) { printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", - (int)cpu); + (int)cpu); dump_stack(); return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; } -- cgit v1.2.1 From 2d86e637d15984e363e8c3f14c8f0470b4a10a3d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:47 -0700 Subject: include/asm-x86/tsc.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/tsc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 7d3e27f7d484..d2d8eb5b55f5 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h @@ -42,7 +42,7 @@ static inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - return (cycles_t) __native_read_tsc(); + return (cycles_t)__native_read_tsc(); } extern void tsc_init(void); -- cgit v1.2.1 From b1fcec7f2296c4b9126e1b85b52494ac8910d528 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:48 -0700 Subject: include/asm-x86/uaccess_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/uaccess_32.h | 316 +++++++++++++++++++++++++------------------ 1 file changed, 187 insertions(+), 129 deletions(-) diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index fcc570ec4fee..8e7595c1f34e 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h @@ -32,7 +32,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* * movsl can be slow when source and dest are not both 8-byte aligned @@ -43,7 +43,9 @@ extern struct movsl_mask { } ____cacheline_aligned_in_smp movsl_mask; #endif -#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) +#define __addr_ok(addr) \ + ((unsigned long __force)(addr) < \ + (current_thread_info()->addr_limit.seg)) /* * Test whether a block of memory is a valid user space address. @@ -54,13 +56,16 @@ extern struct movsl_mask { * * This needs 33-bit arithmetic. We have a carry... */ -#define __range_ok(addr,size) ({ \ - unsigned long flag,roksum; \ - __chk_user_ptr(addr); \ - asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ - :"=&r" (flag), "=r" (roksum) \ - :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ - flag; }) +#define __range_ok(addr, size) \ +({ \ + unsigned long flag, roksum; \ + __chk_user_ptr(addr); \ + asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ + :"=&r" (flag), "=r" (roksum) \ + :"1" (addr), "g" ((int)(size)), \ + "rm" (current_thread_info()->addr_limit.seg)); \ + flag; \ +}) /** * access_ok: - Checks if a user space pointer is valid @@ -81,7 +86,7 @@ extern struct movsl_mask { * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) +#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) /* * The exception table consists of pairs of addresses: the first is the @@ -96,8 +101,7 @@ extern struct movsl_mask { * on our cache or tlb entries. */ -struct exception_table_entry -{ +struct exception_table_entry { unsigned long insn, fixup; }; @@ -122,13 +126,15 @@ extern void __get_user_1(void); extern void __get_user_2(void); extern void __get_user_4(void); -#define __get_user_x(size,ret,x,ptr) \ - __asm__ __volatile__("call __get_user_" #size \ - :"=a" (ret),"=d" (x) \ - :"0" (ptr)) +#define __get_user_x(size, ret, x, ptr) \ + asm volatile("call __get_user_" #size \ + :"=a" (ret),"=d" (x) \ + :"0" (ptr)) + +/* Careful: we have to cast the result to the type of the pointer + * for sign reasons */ -/* Careful: we have to cast the result to the type of the pointer for sign reasons */ /** * get_user: - Get a simple variable from user space. * @x: Variable to store result. @@ -146,15 +152,24 @@ extern void __get_user_4(void); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ -({ int __ret_gu; \ +#define get_user(x, ptr) \ +({ \ + int __ret_gu; \ unsigned long __val_gu; \ __chk_user_ptr(ptr); \ - switch(sizeof (*(ptr))) { \ - case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ - case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ - case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ - default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_x(1, __ret_gu, __val_gu, ptr); \ + break; \ + case 2: \ + __get_user_x(2, __ret_gu, __val_gu, ptr); \ + break; \ + case 4: \ + __get_user_x(4, __ret_gu, __val_gu, ptr); \ + break; \ + default: \ + __get_user_x(X, __ret_gu, __val_gu, ptr); \ + break; \ } \ (x) = (__typeof__(*(ptr)))__val_gu; \ __ret_gu; \ @@ -171,11 +186,25 @@ extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); -#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) +#define __put_user_1(x, ptr) \ + asm volatile("call __put_user_1" : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_2(x, ptr) \ + asm volatile("call __put_user_2" : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_4(x, ptr) \ + asm volatile("call __put_user_4" : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_8(x, ptr) \ + asm volatile("call __put_user_8" : "=a" (__ret_pu) \ + : "A" ((typeof(*(ptr)))(x)), "c" (ptr)) + +#define __put_user_X(x, ptr) \ + asm volatile("call __put_user_X" : "=a" (__ret_pu) \ + : "c" (ptr)) /** * put_user: - Write a simple value into user space. @@ -195,32 +224,43 @@ extern void __put_user_8(void); */ #ifdef CONFIG_X86_WP_WORKS_OK -#define put_user(x,ptr) \ -({ int __ret_pu; \ +#define put_user(x, ptr) \ +({ \ + int __ret_pu; \ __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ __pu_val = x; \ - switch(sizeof(*(ptr))) { \ - case 1: __put_user_1(__pu_val, ptr); break; \ - case 2: __put_user_2(__pu_val, ptr); break; \ - case 4: __put_user_4(__pu_val, ptr); break; \ - case 8: __put_user_8(__pu_val, ptr); break; \ - default:__put_user_X(__pu_val, ptr); break; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_1(__pu_val, ptr); \ + break; \ + case 2: \ + __put_user_2(__pu_val, ptr); \ + break; \ + case 4: \ + __put_user_4(__pu_val, ptr); \ + break; \ + case 8: \ + __put_user_8(__pu_val, ptr); \ + break; \ + default: \ + __put_user_X(__pu_val, ptr); \ + break; \ } \ __ret_pu; \ }) #else -#define put_user(x,ptr) \ +#define put_user(x, ptr) \ ({ \ - int __ret_pu; \ - __typeof__(*(ptr)) __pus_tmp = x; \ - __ret_pu=0; \ - if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ - sizeof(*(ptr))) != 0)) \ - __ret_pu=-EFAULT; \ - __ret_pu; \ - }) + int __ret_pu; \ + __typeof__(*(ptr))__pus_tmp = x; \ + __ret_pu = 0; \ + if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ + sizeof(*(ptr))) != 0)) \ + __ret_pu = -EFAULT; \ + __ret_pu; \ +}) #endif @@ -245,8 +285,8 @@ extern void __put_user_8(void); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) /** @@ -268,54 +308,62 @@ extern void __put_user_8(void); * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ + __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ __pu_err; \ }) -#define __put_user_u64(x, addr, err) \ - __asm__ __volatile__( \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:\n" \ - ".section .fixup,\"ax\"\n" \ - "4: movl %3,%0\n" \ - " jmp 3b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,4b) \ - _ASM_EXTABLE(2b,4b) \ - : "=r"(err) \ - : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) +#define __put_user_u64(x, addr, err) \ + asm volatile("1: movl %%eax,0(%2)\n" \ + "2: movl %%edx,4(%2)\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: movl %3,%0\n" \ + " jmp 3b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=r" (err) \ + : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) #ifdef CONFIG_X86_WP_WORKS_OK -#define __put_user_size(x,ptr,size,retval,errret) \ +#define __put_user_size(x, ptr, size, retval, errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ - case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ - case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ - case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ - default: __put_user_bad(); \ + case 1: \ + __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ + break; \ + case 2: \ + __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ + break; \ + case 4: \ + __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \ + break; \ + case 8: \ + __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ + break; \ + default: \ + __put_user_bad(); \ } \ } while (0) #else -#define __put_user_size(x,ptr,size,retval,errret) \ +#define __put_user_size(x, ptr, size, retval, errret) \ do { \ - __typeof__(*(ptr)) __pus_tmp = x; \ + __typeof__(*(ptr))__pus_tmp = x; \ retval = 0; \ \ - if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ + if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ retval = errret; \ } while (0) @@ -329,65 +377,70 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err) \ - : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) - - -#define __get_user_nocheck(x,ptr,size) \ -({ \ - long __gu_err; \ - unsigned long __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ - (x) = (__typeof__(*(ptr)))__gu_val; \ - __gu_err; \ + asm volatile("1: mov"itype" %"rtype"1,%2\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) + + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err; \ + unsigned long __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval,errret) \ +#define __get_user_size(x, ptr, size, retval, errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ - case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ - case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ - default: (x) = __get_user_bad(); \ + case 1: \ + __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ + break; \ + case 2: \ + __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ + break; \ + case 4: \ + __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \ + break; \ + default: \ + (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - "1: mov"itype" %2,%"rtype"1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " xor"itype" %"rtype"1,%"rtype"1\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err), ltype (x) \ - : "m"(__m(addr)), "i"(errret), "0"(err)) - - -unsigned long __must_check __copy_to_user_ll(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nozero(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, - const void __user *from, unsigned long n); + asm volatile("1: mov"itype" %2,%"rtype"1\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %3,%0\n" \ + " xor"itype" %"rtype"1,%"rtype"1\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)) + + +unsigned long __must_check __copy_to_user_ll + (void __user *to, const void *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll + (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nozero + (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache + (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache_nozero + (void *to, const void __user *from, unsigned long n); /** * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. @@ -416,13 +469,16 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) switch (n) { case 1: - __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); + __put_user_size(*(u8 *)from, (u8 __user *)to, + 1, ret, 1); return ret; case 2: - __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); + __put_user_size(*(u16 *)from, (u16 __user *)to, + 2, ret, 2); return ret; case 4: - __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); + __put_user_size(*(u32 *)from, (u32 __user *)to, + 4, ret, 4); return ret; } } @@ -545,19 +601,21 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, } static __always_inline unsigned long -__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) +__copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) { return __copy_from_user_ll_nocache_nozero(to, from, n); } unsigned long __must_check copy_to_user(void __user *to, - const void *from, unsigned long n); + const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to, - const void __user *from, unsigned long n); + const void __user *from, + unsigned long n); long __must_check strncpy_from_user(char *dst, const char __user *src, - long count); + long count); long __must_check __strncpy_from_user(char *dst, - const char __user *src, long count); + const char __user *src, long count); /** * strlen_user: - Get the size of a string in user space. -- cgit v1.2.1 From b896313e53344e79cc8bbc69f0a7d5c2b1735895 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:49 -0700 Subject: include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/uaccess_64.h | 376 ++++++++++++++++++++++++++----------------- 1 file changed, 227 insertions(+), 149 deletions(-) diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index b87eb4ba8f9d..b8a2f4339903 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -29,23 +29,27 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) -#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) +#define __addr_ok(addr) (!((unsigned long)(addr) & \ + (current_thread_info()->addr_limit.seg))) /* * Uhhuh, this needs 65-bit arithmetic. We have a carry.. */ -#define __range_not_ok(addr,size) ({ \ - unsigned long flag,roksum; \ - __chk_user_ptr(addr); \ - asm("# range_ok\n\r" \ - "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ - :"=&r" (flag), "=r" (roksum) \ - :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ - flag; }) +#define __range_not_ok(addr, size) \ +({ \ + unsigned long flag, roksum; \ + __chk_user_ptr(addr); \ + asm("# range_ok\n\r" \ + "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ + : "=&r" (flag), "=r" (roksum) \ + : "1" (addr), "g" ((long)(size)), \ + "g" (current_thread_info()->addr_limit.seg)); \ + flag; \ +}) -#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) +#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0) /* * The exception table consists of pairs of addresses: the first is the @@ -60,8 +64,7 @@ * on our cache or tlb entries. */ -struct exception_table_entry -{ +struct exception_table_entry { unsigned long insn, fixup; }; @@ -84,23 +87,36 @@ extern int fixup_exception(struct pt_regs *regs); * accesses to the same area of user memory). */ -#define __get_user_x(size,ret,x,ptr) \ - asm volatile("call __get_user_" #size \ - :"=a" (ret),"=d" (x) \ - :"c" (ptr) \ - :"r8") +#define __get_user_x(size, ret, x, ptr) \ + asm volatile("call __get_user_" #size \ + : "=a" (ret),"=d" (x) \ + : "c" (ptr) \ + : "r8") + +/* Careful: we have to cast the result to the type of the pointer + * for sign reasons */ -/* Careful: we have to cast the result to the type of the pointer for sign reasons */ -#define get_user(x,ptr) \ -({ unsigned long __val_gu; \ - int __ret_gu; \ +#define get_user(x, ptr) \ +({ \ + unsigned long __val_gu; \ + int __ret_gu; \ __chk_user_ptr(ptr); \ - switch(sizeof (*(ptr))) { \ - case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ - case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ - case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ - case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ - default: __get_user_bad(); break; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_x(1, __ret_gu, __val_gu, ptr); \ + break; \ + case 2: \ + __get_user_x(2, __ret_gu, __val_gu, ptr); \ + break; \ + case 4: \ + __get_user_x(4, __ret_gu, __val_gu, ptr); \ + break; \ + case 8: \ + __get_user_x(8, __ret_gu, __val_gu, ptr); \ + break; \ + default: \ + __get_user_bad(); \ + break; \ } \ (x) = (__force typeof(*(ptr)))__val_gu; \ __ret_gu; \ @@ -112,55 +128,73 @@ extern void __put_user_4(void); extern void __put_user_8(void); extern void __put_user_bad(void); -#define __put_user_x(size,ret,x,ptr) \ - asm volatile("call __put_user_" #size \ - :"=a" (ret) \ - :"c" (ptr),"d" (x) \ - :"r8") +#define __put_user_x(size, ret, x, ptr) \ + asm volatile("call __put_user_" #size \ + :"=a" (ret) \ + :"c" (ptr),"d" (x) \ + :"r8") -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ int __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ -({ \ - int __pu_err; \ - typeof(*(ptr)) __user *__pu_addr = (ptr); \ - switch (size) { \ - case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ - case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ - case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ - case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ - default: __put_user_bad(); \ - } \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + int __pu_err; \ + typeof(*(ptr)) __user *__pu_addr = (ptr); \ + switch (size) { \ + case 1: \ + __put_user_x(1, __pu_err, x, __pu_addr); \ + break; \ + case 2: \ + __put_user_x(2, __pu_err, x, __pu_addr); \ + break; \ + case 4: \ + __put_user_x(4, __pu_err, x, __pu_addr); \ + break; \ + case 8: \ + __put_user_x(8, __pu_err, x, __pu_addr); \ + break; \ + default: \ + __put_user_bad(); \ + } \ + __pu_err; \ }) -#define __put_user_size(x,ptr,size,retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ - case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ - case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ - case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\ - default: __put_user_bad(); \ + case 1: \ + __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\ + break; \ + case 2: \ + __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\ + break; \ + case 4: \ + __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\ + break; \ + case 8: \ + __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \ + break; \ + default: \ + __put_user_bad(); \ } \ } while (0) @@ -174,23 +208,22 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ - asm volatile( \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err) \ - : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) - - -#define __get_user_nocheck(x,ptr,size) \ + asm volatile("1: mov"itype" %"rtype"1,%2\n" \ + "2:\n" \ + ".section .fixup, \"ax\"\n" \ + "3: mov %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ + : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err)) + + +#define __get_user_nocheck(x, ptr, size) \ ({ \ int __gu_err; \ unsigned long __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ (x) = (__force typeof(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -201,31 +234,39 @@ extern int __get_user_4(void); extern int __get_user_8(void); extern int __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ +#define __get_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ - case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ - case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ - case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ - default: (x) = __get_user_bad(); \ + case 1: \ + __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\ + break; \ + case 2: \ + __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\ + break; \ + case 4: \ + __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\ + break; \ + case 8: \ + __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \ + break; \ + default: \ + (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ - asm volatile( \ - "1: mov"itype" %2,%"rtype"1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: mov %3,%0\n" \ - " xor"itype" %"rtype"1,%"rtype"1\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b,3b) \ - : "=r"(err), ltype (x) \ - : "m"(__m(addr)), "i"(errno), "0"(err)) + asm volatile("1: mov"itype" %2,%"rtype"1\n" \ + "2:\n" \ + ".section .fixup, \"ax\"\n" \ + "3: mov %3,%0\n" \ + " xor"itype" %"rtype"1,%"rtype"1\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i"(errno), "0"(err)) /* * Copy To/From Userspace @@ -244,110 +285,142 @@ copy_in_user(void __user *to, const void __user *from, unsigned len); static __always_inline __must_check int __copy_from_user(void *dst, const void __user *src, unsigned size) -{ +{ int ret = 0; if (!__builtin_constant_p(size)) - return copy_user_generic(dst,(__force void *)src,size); - switch (size) { - case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); + return copy_user_generic(dst, (__force void *)src, size); + switch (size) { + case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, + ret, "b", "b", "=q", 1); return ret; - case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); + case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, + ret, "w", "w", "=r", 2); return ret; - case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); + case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, + ret, "l", "k", "=r", 4); + return ret; + case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, + ret, "q", "", "=r", 8); return ret; - case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); - return ret; case 10: - __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); - if (unlikely(ret)) return ret; - __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); - return ret; + __get_user_asm(*(u64 *)dst, (u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u16 *)(8 + (char *)dst), + (u16 __user *)(8 + (char __user *)src), + ret, "w", "w", "=r", 2); + return ret; case 16: - __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); - if (unlikely(ret)) return ret; - __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); - return ret; + __get_user_asm(*(u64 *)dst, (u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u64 *)(8 + (char *)dst), + (u64 __user *)(8 + (char __user *)src), + ret, "q", "", "=r", 8); + return ret; default: - return copy_user_generic(dst,(__force void *)src,size); + return copy_user_generic(dst, (__force void *)src, size); } -} +} static __always_inline __must_check int __copy_to_user(void __user *dst, const void *src, unsigned size) -{ +{ int ret = 0; if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst,src,size); - switch (size) { - case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); + return copy_user_generic((__force void *)dst, src, size); + switch (size) { + case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, + ret, "b", "b", "iq", 1); return ret; - case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); + case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, + ret, "w", "w", "ir", 2); return ret; - case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); + case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, + ret, "l", "k", "ir", 4); + return ret; + case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, + ret, "q", "", "ir", 8); return ret; - case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); - return ret; case 10: - __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); - if (unlikely(ret)) return ret; + __put_user_asm(*(u64 *)src, (u64 __user *)dst, + ret, "q", "", "ir", 10); + if (unlikely(ret)) + return ret; asm("":::"memory"); - __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); - return ret; + __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; case 16: - __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); - if (unlikely(ret)) return ret; + __put_user_asm(*(u64 *)src, (u64 __user *)dst, + ret, "q", "", "ir", 16); + if (unlikely(ret)) + return ret; asm("":::"memory"); - __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); - return ret; + __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, + ret, "q", "", "ir", 8); + return ret; default: - return copy_user_generic((__force void *)dst,src,size); + return copy_user_generic((__force void *)dst, src, size); } -} +} static __always_inline __must_check int __copy_in_user(void __user *dst, const void __user *src, unsigned size) -{ +{ int ret = 0; if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst,(__force void *)src,size); - switch (size) { - case 1: { + return copy_user_generic((__force void *)dst, + (__force void *)src, size); + switch (size) { + case 1: { u8 tmp; - __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); + __get_user_asm(tmp, (u8 __user *)src, + ret, "b", "b", "=q", 1); if (likely(!ret)) - __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); + __put_user_asm(tmp, (u8 __user *)dst, + ret, "b", "b", "iq", 1); return ret; } - case 2: { + case 2: { u16 tmp; - __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); + __get_user_asm(tmp, (u16 __user *)src, + ret, "w", "w", "=r", 2); if (likely(!ret)) - __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); + __put_user_asm(tmp, (u16 __user *)dst, + ret, "w", "w", "ir", 2); return ret; } - case 4: { + case 4: { u32 tmp; - __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); + __get_user_asm(tmp, (u32 __user *)src, + ret, "l", "k", "=r", 4); if (likely(!ret)) - __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); + __put_user_asm(tmp, (u32 __user *)dst, + ret, "l", "k", "ir", 4); return ret; } - case 8: { + case 8: { u64 tmp; - __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); + __get_user_asm(tmp, (u64 __user *)src, + ret, "q", "", "=r", 8); if (likely(!ret)) - __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); + __put_user_asm(tmp, (u64 __user *)dst, + ret, "q", "", "ir", 8); return ret; } default: - return copy_user_generic((__force void *)dst,(__force void *)src,size); + return copy_user_generic((__force void *)dst, + (__force void *)src, size); } -} +} -__must_check long +__must_check long strncpy_from_user(char *dst, const char __user *src, long count); -__must_check long +__must_check long __strncpy_from_user(char *dst, const char __user *src, long count); __must_check long strnlen_user(const char __user *str, long n); __must_check long __strnlen_user(const char __user *str, long n); @@ -355,7 +428,8 @@ __must_check long strlen_user(const char __user *str); __must_check unsigned long clear_user(void __user *mem, unsigned long len); __must_check unsigned long __clear_user(void __user *mem, unsigned long len); -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); +__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, + unsigned size); static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) @@ -364,15 +438,19 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) } #define ARCH_HAS_NOCACHE_UACCESS 1 -extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); +extern long __copy_user_nocache(void *dst, const void __user *src, + unsigned size, int zerorest); -static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) +static inline int __copy_from_user_nocache(void *dst, const void __user *src, + unsigned size) { might_sleep(); return __copy_user_nocache(dst, src, size, 1); } -static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) +static inline int __copy_from_user_inatomic_nocache(void *dst, + const void __user *src, + unsigned size) { return __copy_user_nocache(dst, src, size, 0); } -- cgit v1.2.1 From 6e714b37978bbd2c7a7eb89b4474b6c2133b7796 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:50 -0700 Subject: include/asm-x86/unaligned.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/unaligned.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index 913598d4f761..d270ffe72759 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h @@ -32,6 +32,6 @@ * * Note that unaligned accesses can be very expensive on some architectures. */ -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) +#define put_unaligned(val, ptr) ((void)(*(ptr) = (val))) #endif /* _ASM_X86_UNALIGNED_H */ -- cgit v1.2.1 From 687fc16b65e96d72a680291670584090207cadf8 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:51 -0700 Subject: include/asm-x86/unistd_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/unistd_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 984123a68f7c..8317d94771d3 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h @@ -81,7 +81,7 @@ #define __NR_sigpending 73 #define __NR_sethostname 74 #define __NR_setrlimit 75 -#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ #define __NR_getrusage 77 #define __NR_gettimeofday 78 #define __NR_settimeofday 79 -- cgit v1.2.1 From c489f4451965f4d355340ea1e60a5863c9ed2890 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:52 -0700 Subject: include/asm-x86/unistd_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/unistd_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index 3883ceb54ef5..fe26e36d0f51 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h @@ -2,7 +2,7 @@ #define _ASM_X86_64_UNISTD_H_ #ifndef __SYSCALL -#define __SYSCALL(a,b) +#define __SYSCALL(a, b) #endif /* -- cgit v1.2.1 From 826700dc9b483d0d0de8ff4901043d54ed8b64f0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:53 -0700 Subject: include/asm-x86/user_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/user_32.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index 6157da6f882c..d6e51edc259d 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h @@ -100,10 +100,10 @@ struct user_regs_struct { struct user{ /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ + struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ + /* for this mess. Not yet used. */ struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ unsigned long int u_tsize; /* Text segment size (pages). */ @@ -118,7 +118,7 @@ struct user{ int reserved; /* No longer used */ unsigned long u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ - struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ + struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ int u_debugreg[8]; -- cgit v1.2.1 From a31216194c84cfa72515db4f9365ce2c68ed5791 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:54 -0700 Subject: include/asm-x86/user32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/user32.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h index f769872debea..a3d910047879 100644 --- a/include/asm-x86/user32.h +++ b/include/asm-x86/user32.h @@ -1,7 +1,8 @@ #ifndef USER32_H #define USER32_H 1 -/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */ +/* IA32 compatible user structures for ptrace. + * These should be used for 32bit coredumps too. */ struct user_i387_ia32_struct { u32 cwd; @@ -42,9 +43,9 @@ struct user_regs_struct32 { }; struct user32 { - struct user_regs_struct32 regs; /* Where the registers are actually stored */ + struct user_regs_struct32 regs; /* Where the registers are actually stored */ int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ + /* for this mess. Not yet used. */ struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ __u32 u_tsize; /* Text segment size (pages). */ -- cgit v1.2.1 From a206ea11b665cfb5360d05367eea1e9cfd3f3c8b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:55 -0700 Subject: include/asm-x86/user_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/user_64.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 963616455609..6037b634c77f 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h @@ -45,12 +45,13 @@ */ /* This matches the 64bit FXSAVE format as defined by AMD. It is the same - as the 32bit format defined by Intel, except that the selector:offset pairs for - data and eip are replaced with flat 64bit pointers. */ + as the 32bit format defined by Intel, except that the selector:offset pairs + for data and eip are replaced with flat 64bit pointers. */ struct user_i387_struct { unsigned short cwd; unsigned short swd; - unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ + unsigned short twd; /* Note this is not the same as + the 32bit/x87/FSAVE twd */ unsigned short fop; __u64 rip; __u64 rdp; @@ -97,13 +98,14 @@ struct user_regs_struct { /* When the kernel dumps core, it starts by dumping the user struct - this will be used by gdb to figure out where the data and stack segments are within the file, and what virtual addresses to use. */ -struct user{ + +struct user { /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ + struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ + /* for this mess. Not yet used. */ int pad0; struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ @@ -120,7 +122,7 @@ struct user{ int pad1; unsigned long u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ - struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ + struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ unsigned long u_debugreg[8]; -- cgit v1.2.1 From ac1a7b0eaa1db04143ab6132c6ce4489afbb8a18 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:56 -0700 Subject: include/asm-x86/vdso.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vdso.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 9bb86899abf0..86e085e003d2 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h @@ -8,9 +8,11 @@ extern const char VDSO64_PRELINK[]; * Given a pointer to the vDSO image, find the pointer to VDSO64_name * as that symbol is defined in the vDSO sources or linker script. */ -#define VDSO64_SYMBOL(base, name) ({ \ - extern const char VDSO64_##name[]; \ - (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); }) +#define VDSO64_SYMBOL(base, name) \ +({ \ + extern const char VDSO64_##name[]; \ + (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \ +}) #endif #if defined CONFIG_X86_32 || defined CONFIG_COMPAT @@ -20,9 +22,11 @@ extern const char VDSO32_PRELINK[]; * Given a pointer to the vDSO image, find the pointer to VDSO32_name * as that symbol is defined in the vDSO sources or linker script. */ -#define VDSO32_SYMBOL(base, name) ({ \ - extern const char VDSO32_##name[]; \ - (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) +#define VDSO32_SYMBOL(base, name) \ +({ \ + extern const char VDSO32_##name[]; \ + (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ +}) #endif /* -- cgit v1.2.1 From 364fe5ef4725176324ec17f8dc3fd488d615b0de Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:57 -0700 Subject: include/asm-x86/vga.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vga.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h index 0ecf68ac03aa..0ccf804377e6 100644 --- a/include/asm-x86/vga.h +++ b/include/asm-x86/vga.h @@ -12,9 +12,9 @@ * access the videoram directly without any black magic. */ -#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) #define vga_readb(x) (*(x)) -#define vga_writeb(x,y) (*(y) = (x)) +#define vga_writeb(x, y) (*(y) = (x)) #endif -- cgit v1.2.1 From 9e8a935bcff6c8154eace12277e6a9d853ef790b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:58 -0700 Subject: include/asm-x86/vm86.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vm86.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index c92fe4af52e8..a2be241ed036 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -42,9 +42,11 @@ #define VM86_ARG(retval) ((retval) >> 8) #define VM86_SIGNAL 0 /* return due to signal */ -#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ +#define VM86_UNKNOWN 1 /* unhandled GP fault + - IO-instruction or similar */ #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ -#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ +#define VM86_STI 3 /* sti/popf/iret instruction enabled + virtual interrupts */ /* * Additional return values when invoking new vm86() @@ -205,7 +207,8 @@ void release_vm86_irqs(struct task_struct *); #define handle_vm86_fault(a, b) #define release_vm86_irqs(a) -static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { +static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) +{ return 0; } -- cgit v1.2.1 From 8948584eb282c4dc5c54f6f6ebbaf447a665c653 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:59 -0700 Subject: include/asm-x86/vmi.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/vmi.h | 88 +++++++++++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h index eb8bd892c01e..b7c0dea119fe 100644 --- a/include/asm-x86/vmi.h +++ b/include/asm-x86/vmi.h @@ -155,9 +155,9 @@ #ifndef __ASSEMBLY__ struct vmi_relocation_info { - unsigned char *eip; - unsigned char type; - unsigned char reserved[3]; + unsigned char *eip; + unsigned char type; + unsigned char reserved[3]; }; #endif @@ -173,53 +173,53 @@ struct vmi_relocation_info { #ifndef __ASSEMBLY__ struct vrom_header { - u16 rom_signature; // option ROM signature - u8 rom_length; // ROM length in 512 byte chunks - u8 rom_entry[4]; // 16-bit code entry point - u8 rom_pad0; // 4-byte align pad - u32 vrom_signature; // VROM identification signature - u8 api_version_min;// Minor version of API - u8 api_version_maj;// Major version of API - u8 jump_slots; // Number of jump slots - u8 reserved1; // Reserved for expansion - u32 virtual_top; // Hypervisor virtual address start - u16 reserved2; // Reserved for expansion - u16 license_offs; // Offset to License string - u16 pci_header_offs;// Offset to PCI OPROM header - u16 pnp_header_offs;// Offset to PnP OPROM header - u32 rom_pad3; // PnP reserverd / VMI reserved - u8 reserved[96]; // Reserved for headers - char vmi_init[8]; // VMI_Init jump point - char get_reloc[8]; // VMI_GetRelocationInfo jump point + u16 rom_signature; /* option ROM signature */ + u8 rom_length; /* ROM length in 512 byte chunks */ + u8 rom_entry[4]; /* 16-bit code entry point */ + u8 rom_pad0; /* 4-byte align pad */ + u32 vrom_signature; /* VROM identification signature */ + u8 api_version_min;/* Minor version of API */ + u8 api_version_maj;/* Major version of API */ + u8 jump_slots; /* Number of jump slots */ + u8 reserved1; /* Reserved for expansion */ + u32 virtual_top; /* Hypervisor virtual address start */ + u16 reserved2; /* Reserved for expansion */ + u16 license_offs; /* Offset to License string */ + u16 pci_header_offs;/* Offset to PCI OPROM header */ + u16 pnp_header_offs;/* Offset to PnP OPROM header */ + u32 rom_pad3; /* PnP reserverd / VMI reserved */ + u8 reserved[96]; /* Reserved for headers */ + char vmi_init[8]; /* VMI_Init jump point */ + char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ } __attribute__((packed)); struct pnp_header { - char sig[4]; - char rev; - char size; - short next; - short res; - long devID; - unsigned short manufacturer_offset; - unsigned short product_offset; + char sig[4]; + char rev; + char size; + short next; + short res; + long devID; + unsigned short manufacturer_offset; + unsigned short product_offset; } __attribute__((packed)); struct pci_header { - char sig[4]; - short vendorID; - short deviceID; - short vpdData; - short size; - char rev; - char class; - char subclass; - char interface; - short chunks; - char rom_version_min; - char rom_version_maj; - char codetype; - char lastRom; - short reserved; + char sig[4]; + short vendorID; + short deviceID; + short vpdData; + short size; + char rev; + char class; + char subclass; + char interface; + short chunks; + char rom_version_min; + char rom_version_maj; + char codetype; + char lastRom; + short reserved; } __attribute__((packed)); /* Function prototypes for bootstrapping */ -- cgit v1.2.1 From d6ae390a0be73e6b777c6171e6b6f616462f555d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:04:00 -0700 Subject: include/asm-x86/voyager.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/voyager.h | 51 ++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h index 91a9932937ab..9c811d2e6f91 100644 --- a/include/asm-x86/voyager.h +++ b/include/asm-x86/voyager.h @@ -91,8 +91,7 @@ #define VOYAGER_WRITE_CONFIG 0x2 #define VOYAGER_BYPASS 0xff -typedef struct voyager_asic -{ +typedef struct voyager_asic { __u8 asic_addr; /* ASIC address; Level 4 */ __u8 asic_type; /* ASIC type */ __u8 asic_id; /* ASIC id */ @@ -113,7 +112,7 @@ typedef struct voyager_module { __u16 largest_reg; /* Largest register in the scan path */ __u16 smallest_reg; /* Smallest register in the scan path */ voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ - struct voyager_module *submodule; /* Submodule pointer */ + struct voyager_module *submodule; /* Submodule pointer */ struct voyager_module *next; /* Next module in linked list */ } voyager_module_t; @@ -135,7 +134,7 @@ typedef struct voyager_eeprom_hdr { __u16 cct_offset; __u16 log_length; /* length of err log */ __u16 xsum_end; /* offset to end of - checksum */ + checksum */ __u8 reserved[4]; __u8 sflag; /* starting sentinal */ __u8 part_number[13]; /* prom part number */ @@ -148,7 +147,8 @@ typedef struct voyager_eeprom_hdr { -#define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) +#define VOYAGER_EPROM_SIZE_OFFSET \ + ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) #define VOYAGER_XSUM_END_OFFSET 0x2a /* the following three definitions are for internal table layouts @@ -199,7 +199,7 @@ typedef struct voyager_asic_data_table { #define VOYAGER_WCBIC_TOM_L 0x4 #define VOYAGER_WCBIC_TOM_H 0x5 -/* register defines for Voyager Memory Contol (VMC) +/* register defines for Voyager Memory Contol (VMC) * these are present on L4 machines only */ #define VOYAGER_VMC1 0x81 #define VOYAGER_VMC2 0x91 @@ -334,7 +334,7 @@ typedef struct { struct QuadDescription { __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields - * will be zero except for slot */ + * will be zero except for slot */ __u8 StructureVersion; __u32 CPI_BaseAddress; __u32 LARC_BankSize; @@ -342,7 +342,7 @@ struct QuadDescription { __u8 Slot; /* Processor slots 1 - 4 */ } __attribute__((packed)); -struct ProcBoardInfo { +struct ProcBoardInfo { __u8 Type; __u8 StructureVersion; __u8 NumberOfBoards; @@ -382,19 +382,30 @@ struct CPU_Info { * packed in it by our friend the compiler. */ typedef struct { - __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ - __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ - __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ - __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ - __u32 OS_Flags; /* Flags set by the OS as info for SUS */ - __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ - __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ + __u8 Mailbox_SUS; /* Written to by SUS to give + commands/response to the OS */ + __u8 Mailbox_OS; /* Written to by the OS to give + commands/response to SUS */ + __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the + interface SUS supports */ + __u8 OS_MailboxVersion; /* Tells SUS which iteration of the + interface the OS supports */ + __u32 OS_Flags; /* Flags set by the OS as info for + SUS */ + __u32 SUS_Flags; /* Flags set by SUS as info + for the OS */ + __u32 WatchDogPeriod; /* Watchdog period (in seconds) which + the DP uses to see if the OS + is dead */ __u32 WatchDogCount; /* Updated by the OS on every tic. */ - __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ - MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ + __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS + where to stuff the SUS error log + on a dump */ + MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; + /* Storage for MCA POS data */ /* All new SECOND_PASS_INTERFACE fields added from this point */ - struct ProcBoardInfo *BoardData; - struct CPU_Info *CPU_Data; + struct ProcBoardInfo *BoardData; + struct CPU_Info *CPU_Data; /* All new fields must be added from this point */ } Voyager_KernelSUS_Mbox_t; @@ -478,7 +489,7 @@ struct voyager_SUS { __u32 SUS_errorlog; /* lots of system configuration stuff under here */ }; - + /* Variables exported by voyager_smp */ extern __u32 voyager_extended_vic_processors; extern __u32 voyager_allowed_boot_processors; -- cgit v1.2.1 From 8fdf765527920e30d8fd57da3a83d6bf56799114 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:04:02 -0700 Subject: include/asm-x86/xor_32.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/xor_32.h | 494 ++++++++++++++++++++++++----------------------- 1 file changed, 248 insertions(+), 246 deletions(-) diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index a41ef1bdd424..067b5c1835a3 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h @@ -16,12 +16,12 @@ * Copyright (C) 1998 Ingo Molnar. */ -#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" -#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" -#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" -#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" -#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" -#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" +#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n" +#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n" +#define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" +#define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" +#define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" +#define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" #include @@ -32,24 +32,24 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - ST(i,0) \ - XO1(i+1,1) \ - ST(i+1,1) \ - XO1(i+2,2) \ - ST(i+2,2) \ - XO1(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + ST(i, 0) \ + XO1(i+1, 1) \ + ST(i+1, 1) \ + XO1(i + 2, 2) \ + ST(i + 2, 2) \ + XO1(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) @@ -76,25 +76,25 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - ST(i,0) \ - XO2(i+1,1) \ - ST(i+1,1) \ - XO2(i+2,2) \ - ST(i+2,2) \ - XO2(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + ST(i, 0) \ + XO2(i + 1, 1) \ + ST(i + 1, 1) \ + XO2(i + 2, 2) \ + ST(i + 2, 2) \ + XO2(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" " 1: ;\n" @@ -125,29 +125,29 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - ST(i,0) \ - XO3(i+1,1) \ - ST(i+1,1) \ - XO3(i+2,2) \ - ST(i+2,2) \ - XO3(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + XO3(i, 0) \ + ST(i, 0) \ + XO3(i + 1, 1) \ + ST(i + 1, 1) \ + XO3(i + 2, 2) \ + ST(i + 2, 2) \ + XO3(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" " 1: ;\n" @@ -186,35 +186,35 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); + asm("" : "+r" (p4), "+r" (p5)); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - XO4(i,0) \ - ST(i,0) \ - XO4(i+1,1) \ - ST(i+1,1) \ - XO4(i+2,2) \ - ST(i+2,2) \ - XO4(i+3,3) \ - ST(i+3,3) +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + XO3(i, 0) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ + XO4(i, 0) \ + ST(i, 0) \ + XO4(i + 1, 1) \ + ST(i + 1, 1) \ + XO4(i + 2, 2) \ + ST(i + 2, 2) \ + XO4(i + 3, 3) \ + ST(i + 3, 3) " .align 32 ;\n" " 1: ;\n" @@ -233,13 +233,13 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) - : "r" (p4), "r" (p5) + : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); + asm("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } @@ -259,7 +259,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( " .align 32 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -286,7 +286,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) " pxor 56(%2), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " decl %0 ;\n" @@ -307,7 +307,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -342,7 +342,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, " pxor 56(%3), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" @@ -364,7 +364,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, kernel_fpu_begin(); - __asm__ __volatile__ ( + asm volatile( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -407,7 +407,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, " pxor 56(%4), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" @@ -436,9 +436,9 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); + asm("" : "+r" (p4), "+r" (p5)); - __asm__ __volatile__ ( + asm volatile( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" @@ -489,7 +489,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, " pxor 56(%5), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" - + " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" @@ -505,7 +505,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); + asm("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } @@ -531,11 +531,12 @@ static struct xor_block_template xor_block_p5_mmx = { * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */ -#define XMMS_SAVE do { \ +#define XMMS_SAVE \ +do { \ preempt_disable(); \ cr0 = read_cr0(); \ clts(); \ - __asm__ __volatile__ ( \ + asm volatile( \ "movups %%xmm0,(%0) ;\n\t" \ "movups %%xmm1,0x10(%0) ;\n\t" \ "movups %%xmm2,0x20(%0) ;\n\t" \ @@ -543,10 +544,11 @@ static struct xor_block_template xor_block_p5_mmx = { : \ : "r" (xmm_save) \ : "memory"); \ -} while(0) +} while (0) -#define XMMS_RESTORE do { \ - __asm__ __volatile__ ( \ +#define XMMS_RESTORE \ +do { \ + asm volatile( \ "sfence ;\n\t" \ "movups (%0),%%xmm0 ;\n\t" \ "movups 0x10(%0),%%xmm1 ;\n\t" \ @@ -557,76 +559,76 @@ static struct xor_block_template xor_block_p5_mmx = { : "memory"); \ write_cr0(cr0); \ preempt_enable(); \ -} while(0) +} while (0) #define ALIGN16 __attribute__((aligned(16))) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" -#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" -#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" +#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" +#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" -#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" -#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" -#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" -#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" -#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" +#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" +#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" +#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" +#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" +#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ +#define BLOCK(i) \ + LD(i, 0) \ + LD(i + 1, 1) \ PF1(i) \ - PF1(i+2) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF1(i + 2) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : - : "memory"); + : "memory"); XMMS_RESTORE; } @@ -635,59 +637,59 @@ static void xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ + PF1(i + 2) \ LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - PF0(i+4) \ - PF0(i+6) \ + PF2(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " addl $256, %3 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r"(p2), "+r"(p3) : - : "memory" ); + : "memory" ); XMMS_RESTORE; } @@ -696,66 +698,66 @@ static void xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ + PF1(i + 2) \ LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ + PF2(i + 2) \ XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ - PF0(i+4) \ - PF0(i+6) \ + PF3(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " addl $256, %4 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " addl $256, %3 ;\n" + " addl $256, %4 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : - : "memory" ); + : "memory" ); XMMS_RESTORE; } @@ -764,7 +766,7 @@ static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { - unsigned long lines = bytes >> 8; + unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; @@ -776,65 +778,65 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); + asm("" : "+r" (p4), "+r" (p5)); - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ + PF1(i + 2) \ LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ + PF2(i + 2) \ XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ + PF3(i + 2) \ XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ PF4(i) \ - PF4(i+2) \ - PF0(i+4) \ - PF0(i+6) \ + PF4(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ XO4(i,0) \ - XO4(i+1,1) \ - XO4(i+2,2) \ - XO4(i+3,3) \ + XO4(i + 1, 1) \ + XO4(i + 2, 2) \ + XO4(i + 3, 3) \ ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " addl $256, %4 ;\n" - " addl $256, %5 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" + " addl $256, %1 ;\n" + " addl $256, %2 ;\n" + " addl $256, %3 ;\n" + " addl $256, %4 ;\n" + " addl $256, %5 ;\n" + " decl %0 ;\n" + " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) @@ -843,17 +845,17 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); + asm("" : "=r" (p4), "=r" (p5)); XMMS_RESTORE; } static struct xor_block_template xor_block_pIII_sse = { - .name = "pIII_sse", - .do_2 = xor_sse_2, - .do_3 = xor_sse_3, - .do_4 = xor_sse_4, - .do_5 = xor_sse_5, + .name = "pIII_sse", + .do_2 = xor_sse_2, + .do_3 = xor_sse_3, + .do_4 = xor_sse_4, + .do_5 = xor_sse_5, }; /* Also try the generic routines. */ @@ -861,21 +863,21 @@ static struct xor_block_template xor_block_pIII_sse = { #undef XOR_TRY_TEMPLATES #define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_8regs); \ - xor_speed(&xor_block_8regs_p); \ - xor_speed(&xor_block_32regs); \ - xor_speed(&xor_block_32regs_p); \ - if (cpu_has_xmm) \ - xor_speed(&xor_block_pIII_sse); \ - if (cpu_has_mmx) { \ - xor_speed(&xor_block_pII_mmx); \ - xor_speed(&xor_block_p5_mmx); \ - } \ - } while (0) +do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + if (cpu_has_xmm) \ + xor_speed(&xor_block_pIII_sse); \ + if (cpu_has_mmx) { \ + xor_speed(&xor_block_pII_mmx); \ + xor_speed(&xor_block_p5_mmx); \ + } \ +} while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ +#define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) -- cgit v1.2.1 From 687c80540975709844699d9a96070e2cf3bad39d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:04:03 -0700 Subject: include/asm-x86/xor_64.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/xor_64.h | 294 ++++++++++++++++++++++++----------------------- 1 file changed, 149 insertions(+), 145 deletions(-) diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index 1eee7fcb2420..24957e39ac8a 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h @@ -24,20 +24,23 @@ */ /* - * x86-64 changes / gcc fixes from Andi Kleen. + * x86-64 changes / gcc fixes from Andi Kleen. * Copyright 2002 Andi Kleen, SuSE Labs. * * This hasn't been optimized for the hammer yet, but there are likely * no advantages to be gotten from x86-64 here anyways. */ -typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; +typedef struct { + unsigned long a, b; +} __attribute__((aligned(16))) xmm_store_t; -/* Doesn't use gcc to save the XMM registers, because there is no easy way to +/* Doesn't use gcc to save the XMM registers, because there is no easy way to tell it to do a clts before the register saving. */ -#define XMMS_SAVE do { \ +#define XMMS_SAVE \ +do { \ preempt_disable(); \ - asm volatile ( \ + asm volatile( \ "movq %%cr0,%0 ;\n\t" \ "clts ;\n\t" \ "movups %%xmm0,(%1) ;\n\t" \ @@ -47,10 +50,11 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; : "=&r" (cr0) \ : "r" (xmm_save) \ : "memory"); \ -} while(0) +} while (0) -#define XMMS_RESTORE do { \ - asm volatile ( \ +#define XMMS_RESTORE \ +do { \ + asm volatile( \ "sfence ;\n\t" \ "movups (%1),%%xmm0 ;\n\t" \ "movups 0x10(%1),%%xmm1 ;\n\t" \ @@ -61,72 +65,72 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; : "r" (cr0), "r" (xmm_save) \ : "memory"); \ preempt_enable(); \ -} while(0) +} while (0) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" -#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" -#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" +#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" +#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" -#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" -#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" -#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" -#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" -#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" +#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" +#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" +#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" +#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" +#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { - unsigned int lines = bytes >> 8; + unsigned int lines = bytes >> 8; unsigned long cr0; xmm_store_t xmm_save[4]; XMMS_SAVE; - asm volatile ( + asm volatile( #undef BLOCK #define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ + LD(i, 0) \ + LD(i + 1, 1) \ PF1(i) \ - PF1(i+2) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF1(i + 2) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" " decl %[cnt] ; jnz 1b" : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) - : [inc] "r" (256UL) - : "memory"); + : [inc] "r" (256UL) + : "memory"); XMMS_RESTORE; } @@ -141,52 +145,52 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + PF1(i + 2) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF2(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" - " addq %[inc], %[p3] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p3] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) : [inc] "r" (256UL) - : "memory"); + : "memory"); XMMS_RESTORE; } @@ -195,64 +199,64 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned int lines = bytes >> 8; - xmm_store_t xmm_save[4]; + xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + PF1(i + 2) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + PF2(i + 2) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF3(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ + XO3(i, 0) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" - " addq %[inc], %[p3] ;\n" - " addq %[inc], %[p4] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p3] ;\n" + " addq %[inc], %[p4] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) : [inc] "r" (256UL) - : "memory" ); + : "memory" ); XMMS_RESTORE; } @@ -261,70 +265,70 @@ static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { - unsigned int lines = bytes >> 8; + unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; - __asm__ __volatile__ ( + asm volatile( #undef BLOCK #define BLOCK(i) \ PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ + PF1(i + 2) \ + LD(i, 0) \ + LD(i + 1, 1) \ + LD(i + 2, 2) \ + LD(i + 3, 3) \ PF2(i) \ - PF2(i+2) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ + PF2(i + 2) \ + XO1(i, 0) \ + XO1(i + 1, 1) \ + XO1(i + 2, 2) \ + XO1(i + 3, 3) \ PF3(i) \ - PF3(i+2) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ + PF3(i + 2) \ + XO2(i, 0) \ + XO2(i + 1, 1) \ + XO2(i + 2, 2) \ + XO2(i + 3, 3) \ PF4(i) \ - PF4(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - XO4(i,0) \ - XO4(i+1,1) \ - XO4(i+2,2) \ - XO4(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ + PF4(i + 2) \ + PF0(i + 4) \ + PF0(i + 6) \ + XO3(i, 0) \ + XO3(i + 1, 1) \ + XO3(i + 2, 2) \ + XO3(i + 3, 3) \ + XO4(i, 0) \ + XO4(i + 1, 1) \ + XO4(i + 2, 2) \ + XO4(i + 3, 3) \ + ST(i, 0) \ + ST(i + 1, 1) \ + ST(i + 2, 2) \ + ST(i + 3, 3) \ PF0(0) PF0(2) " .align 32 ;\n" - " 1: ;\n" + " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) - " addq %[inc], %[p1] ;\n" - " addq %[inc], %[p2] ;\n" - " addq %[inc], %[p3] ;\n" - " addq %[inc], %[p4] ;\n" - " addq %[inc], %[p5] ;\n" + " addq %[inc], %[p1] ;\n" + " addq %[inc], %[p2] ;\n" + " addq %[inc], %[p3] ;\n" + " addq %[inc], %[p4] ;\n" + " addq %[inc], %[p5] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), - [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), + [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) : [inc] "r" (256UL) : "memory"); @@ -333,18 +337,18 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static struct xor_block_template xor_block_sse = { - .name = "generic_sse", - .do_2 = xor_sse_2, - .do_3 = xor_sse_3, - .do_4 = xor_sse_4, - .do_5 = xor_sse_5, + .name = "generic_sse", + .do_2 = xor_sse_2, + .do_3 = xor_sse_3, + .do_4 = xor_sse_4, + .do_5 = xor_sse_5, }; #undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_sse); \ - } while (0) +#define XOR_TRY_TEMPLATES \ +do { \ + xor_speed(&xor_block_sse); \ +} while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu -- cgit v1.2.1 From d93c870bad38e8daaaf9f7e900a13431f24becbb Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 24 Mar 2008 16:43:21 -0700 Subject: x86: only enable interrupts when kernel state has been set up The sysenter path tries to enable interrupts immediately. Unfortunately this doesn't work in a paravirt environment, because not enough kernel state has been set up at that point (namely, pointing %fs to the kernel percpu data segment). To fix this, defer ENABLE_INTERRUPTS until after the kernel state has been set up. Unfortunately this means that we're running with interrupts disabled for a while without calling the IRQ tracing code, but that can't be called without setting up %fs either. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 4b87c32b639f..a664d5726d8d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -291,10 +291,10 @@ ENTRY(ia32_sysenter_target) movl TSS_sysenter_sp0(%esp),%esp sysenter_past_esp: /* - * No need to follow this irqs on/off section: the syscall - * disabled irqs and here we enable it straight after entry: + * Interrupts are disabled here, but we can't trace it until + * enough kernel state to call TRACE_IRQS_OFF can be called - but + * we immediately enable interrupts at that point anyway. */ - ENABLE_INTERRUPTS(CLBR_NONE) pushl $(__USER_DS) CFI_ADJUST_CFA_OFFSET 4 /*CFI_REL_OFFSET ss, 0*/ @@ -302,6 +302,7 @@ sysenter_past_esp: CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET esp, 0 pushfl + orl $X86_EFLAGS_IF, (%esp) CFI_ADJUST_CFA_OFFSET 4 pushl $(__USER_CS) CFI_ADJUST_CFA_OFFSET 4 @@ -315,6 +316,11 @@ sysenter_past_esp: CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + SAVE_ALL + ENABLE_INTERRUPTS(CLBR_NONE) + /* * Load the potential sixth argument from user stack. * Careful about security. @@ -322,14 +328,12 @@ sysenter_past_esp: cmpl $__PAGE_OFFSET-3,%ebp jae syscall_fault 1: movl (%ebp),%ebp + movl %ebp,PT_EBP(%esp) .section __ex_table,"a" .align 4 .long 1b,syscall_fault .previous - pushl %eax - CFI_ADJUST_CFA_OFFSET 4 - SAVE_ALL GET_THREAD_INFO(%ebp) /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ @@ -543,9 +547,6 @@ END(syscall_exit_work) RING0_INT_FRAME # can't unwind into user space anyway syscall_fault: - pushl %eax # save orig_eax - CFI_ADJUST_CFA_OFFSET 4 - SAVE_ALL GET_THREAD_INFO(%ebp) movl $-EFAULT,PT_EAX(%esp) jmp resume_userspace -- cgit v1.2.1 From 7fda20f146d5d217684ffbc37c6b6c5f82c2dffd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Feb 2008 10:29:38 +0100 Subject: x86: spinlock ops are always-inlined Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 47dfe2607bb1..bc6376f1bc5a 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -78,7 +78,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { short inc = 0x0100; @@ -99,7 +99,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) { int tmp; short new; @@ -120,7 +120,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -142,7 +142,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -165,7 +165,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) { int tmp; int new; @@ -187,7 +187,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) -- cgit v1.2.1 From 43cdf5d6e0a75c1069adc8d126b97b792ff53142 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sat, 22 Mar 2008 18:50:22 +0100 Subject: x86: pgtable, document pde bits Some of pde bits weren't documented, add the short description to them. Signed-off-by: Jiri Slaby Cc: H. Peter Anvin Signed-off-by: Ingo Molnar --- include/asm-x86/pgtable.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 2ce765070464..f1d9f4a03f6f 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -4,13 +4,13 @@ #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) #define FIRST_USER_ADDRESS 0 -#define _PAGE_BIT_PRESENT 0 -#define _PAGE_BIT_RW 1 -#define _PAGE_BIT_USER 2 -#define _PAGE_BIT_PWT 3 -#define _PAGE_BIT_PCD 4 -#define _PAGE_BIT_ACCESSED 5 -#define _PAGE_BIT_DIRTY 6 +#define _PAGE_BIT_PRESENT 0 /* is present */ +#define _PAGE_BIT_RW 1 /* writeable */ +#define _PAGE_BIT_USER 2 /* userspace addressable */ +#define _PAGE_BIT_PWT 3 /* page write through */ +#define _PAGE_BIT_PCD 4 /* page cache disabled */ +#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ +#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ #define _PAGE_BIT_FILE 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ -- cgit v1.2.1 From e5699a8231593d0e11e65ccf248549935304dab1 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Mon, 24 Mar 2008 14:48:36 -0700 Subject: x86: clean up vSMP detection vSMP detection: access pci config space early in boot to detect if the system is a vSMPowered box, and cache the result in a flag, so that is_vsmp_box() retrieves the value of the flag always. Signed-off-by: Ravikiran Thirumalai Signed-off-by: Ingo Molnar --- arch/x86/kernel/vsmp_64.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 1e9a791dbe39..caf2a26f5cfd 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -108,25 +108,34 @@ static void __init set_vsmp_pv_ops(void) #endif #ifdef CONFIG_PCI -static int vsmp = -1; +static int is_vsmp = -1; -int is_vsmp_box(void) +static void __init detect_vsmp_box(void) { - if (vsmp != -1) - return vsmp; + is_vsmp = 0; - vsmp = 0; if (!early_pci_allowed()) - return vsmp; + return; - /* Check if we are running on a ScaleMP vSMP box */ + /* Check if we are running on a ScaleMP vSMPowered box */ if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) - vsmp = 1; + is_vsmp = 1; +} - return vsmp; +int is_vsmp_box(void) +{ + if (is_vsmp != -1) + return is_vsmp; + else { + WARN_ON_ONCE(1); + return 0; + } } #else +static int __init detect_vsmp_box(void) +{ +} int is_vsmp_box(void) { return 0; @@ -135,6 +144,7 @@ int is_vsmp_box(void) void __init vsmp_init(void) { + detect_vsmp_box(); if (!is_vsmp_box()) return; -- cgit v1.2.1 From 15a601eb9cdc2a9cc69d5fc745317805a85c064c Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 12 Mar 2008 11:54:16 -0400 Subject: x86: fix test_poke for vmalloced pages * Ingo Molnar (mingo@elte.hu) wrote: > > * Mathieu Desnoyers wrote: > > > The shadow vmap for DEBUG_RODATA kernel text modification uses > > virt_to_page to get the pages from the pointer address. > > > > However, I think vmalloc_to_page would be required in case the page is > > used for modules. > > > > Since only the core kernel text is marked read-only, use > > kernel_text_address() to make sure we only shadow map the core kernel > > text, not modules. > > actually, i think we should mark module text readonly too. > Yes, but in the meantime, the x86 tree would need this patch to make kprobes work correctly on modules. I suspect that without this fix, with the enhanced hotplug and kprobes patch, kprobes will use text_poke to insert breakpoints in modules (vmalloced pages used), which will map the wrong pages and corrupt random kernel locations instead of updating the correct page. Work that would write protect the module pages should clearly be done, but it can come in a later time. We have to make sure we interact correctly with the page allocation debugging, as an example. Here is the patch against x86.git 2.6.25-rc5 : The shadow vmap for DEBUG_RODATA kernel text modification uses virt_to_page to get the pages from the pointer address. However, I think vmalloc_to_page would be required in case the page is used for modules. Since only the core kernel text is marked read-only, use kernel_text_address() to make sure we only shadow map the core kernel text, not modules. Signed-off-by: Mathieu Desnoyers CC: akpm@linux-foundation.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 0c92ad4d257a..df4099dc1c68 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -515,7 +515,7 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) BUG_ON(len > sizeof(long)); BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1)) - ((long)addr & ~(sizeof(long) - 1))); - { + if (kernel_text_address((unsigned long)addr)) { struct page *pages[2] = { virt_to_page(addr), virt_to_page(addr + PAGE_SIZE) }; if (!pages[1]) @@ -526,6 +526,13 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); local_irq_restore(flags); vunmap(vaddr); + } else { + /* + * modules are in vmalloc'ed memory, always writable. + */ + local_irq_save(flags); + memcpy(addr, opcode, len); + local_irq_restore(flags); } sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but -- cgit v1.2.1 From 4039ae538030d1c5fc70a9c4e168a758d35b8159 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Sun, 23 Mar 2008 00:00:06 +0300 Subject: x86: relocate_kernel_32.S - clear register in more elegant way Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/relocate_kernel_32.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index ec4e6a0e0c2e..fbc4fad23137 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -185,8 +185,7 @@ identity_mapped: /* Set cr4 to a known state: * Setting everything to zero seems safe. */ - movl %cr4, %eax - andl $0, %eax + xorl %eax, %eax movl %eax, %cr4 jmp 1f -- cgit v1.2.1 From a7bba17bf09e1c5bdbdd6c0ab0c7833baedf4653 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Sun, 23 Mar 2008 00:00:07 +0300 Subject: x86: relocate_kernel - use PAGE_SIZE instead of numeric constant Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/relocate_kernel_32.S | 2 +- arch/x86/kernel/relocate_kernel_64.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index fbc4fad23137..ce12bb8678ad 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -154,7 +154,7 @@ relocate_new_kernel: movl %eax, %cr3 /* setup a new stack at the end of the physical control page */ - lea 4096(%edi), %esp + lea PAGE_SIZE(%edi), %esp /* jump to identity mapped page */ movl %edi, %eax diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index e252c0ed9539..c2c8b9d6e241 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -159,7 +159,7 @@ relocate_new_kernel: movq %r9, %cr3 /* setup a new stack at the end of the physical control page */ - lea 4096(%r8), %rsp + lea PAGE_SIZE(%r8), %rsp /* jump to identity mapped page */ addq $(identity_mapped - relocate_kernel), %r8 -- cgit v1.2.1 From fd3af53122e616c0ddba44a3da6d1c1877f72d29 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Sun, 23 Mar 2008 00:00:08 +0300 Subject: x86: relocate_kernel - use predefined macroses for processor state Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/relocate_kernel_32.S | 17 +++++++++-------- arch/x86/kernel/relocate_kernel_64.S | 32 +++++++++++--------------------- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index ce12bb8678ad..a7ecc8e0bc67 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -9,6 +9,7 @@ #include #include #include +#include /* * Must be relocatable PIC code callable as a C function @@ -167,16 +168,16 @@ identity_mapped: pushl %edx /* Set cr0 to a known state: - * 31 0 == Paging disabled - * 18 0 == Alignment check disabled - * 16 0 == Write protect disabled - * 3 0 == No task switch - * 2 0 == Don't do FP software emulation. - * 0 1 == Proctected mode enabled + * - Paging disabled + * - Alignment check disabled + * - Write protect disabled + * - No task switch + * - Don't do FP software emulation. + * - Proctected mode enabled */ movl %cr0, %eax - andl $~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax - orl $(1<<0), %eax + andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax + orl $(X86_CR0_PE), %eax movl %eax, %cr0 /* clear cr4 if applicable */ diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index c2c8b9d6e241..0c93a945b32e 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -9,6 +9,7 @@ #include #include #include +#include /* * Must be relocatable PIC code callable as a C function @@ -171,33 +172,22 @@ identity_mapped: pushq %rdx /* Set cr0 to a known state: - * 31 1 == Paging enabled - * 18 0 == Alignment check disabled - * 16 0 == Write protect disabled - * 3 0 == No task switch - * 2 0 == Don't do FP software emulation. - * 0 1 == Proctected mode enabled + * - Paging enabled + * - Alignment check disabled + * - Write protect disabled + * - No task switch + * - Don't do FP software emulation. + * - Proctected mode enabled */ movq %cr0, %rax - andq $~((1<<18)|(1<<16)|(1<<3)|(1<<2)), %rax - orl $((1<<31)|(1<<0)), %eax + andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax + orl $(X86_CR0_PG | X86_CR0_PE), %eax movq %rax, %cr0 /* Set cr4 to a known state: - * 10 0 == xmm exceptions disabled - * 9 0 == xmm registers instructions disabled - * 8 0 == performance monitoring counter disabled - * 7 0 == page global disabled - * 6 0 == machine check exceptions disabled - * 5 1 == physical address extension enabled - * 4 0 == page size extensions disabled - * 3 0 == Debug extensions disabled - * 2 0 == Time stamp disable (disabled) - * 1 0 == Protected mode virtual interrupts disabled - * 0 0 == VME disabled + * - physical address extension enabled */ - - movq $((1<<5)), %rax + movq $X86_CR4_PAE, %rax movq %rax, %cr4 jmp 1f -- cgit v1.2.1 From 366932deb335f0b84a08463c5c912bd42ac3397a Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Sun, 23 Mar 2008 00:00:09 +0300 Subject: x86: relocate_kernel - use predefined macroses for page attributes Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/relocate_kernel_32.S | 5 +++-- arch/x86/kernel/relocate_kernel_64.S | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index a7ecc8e0bc67..c30fe25d470d 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -10,14 +10,15 @@ #include #include #include +#include /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 2) -#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ -#define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */ +#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#define PAE_PGD_ATTR (_PAGE_PRESENT) .text .align PAGE_SIZE diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 0c93a945b32e..f5afe665a82b 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -10,13 +10,14 @@ #include #include #include +#include /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 3) -#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ +#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) .text .align PAGE_SIZE -- cgit v1.2.1 From 5524ea320d80e3ac6aeeec44216660831c76da08 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 11 Mar 2008 02:23:20 +0100 Subject: x86: don't set up early exception handlers for external interrupts All of early setup runs with interrupts disabled, so there is no need to set up early exception handlers for vectors >= 32 This saves some minor text size. Signed-off-by: Andi Kleen Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 2 +- arch/x86/kernel/head_64.S | 6 ++---- include/asm-x86/segment.h | 3 ++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 48be76cda93b..d6d54faa84df 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -127,7 +127,7 @@ void __init x86_64_start_kernel(char * real_mode_data) /* Cleanup the over mapped high alias */ cleanup_highmap(); - for (i = 0; i < IDT_ENTRIES; i++) { + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); #else diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 017216916dff..2c0abe0e3c68 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -278,10 +278,8 @@ bad_address: .globl early_idt_handlers early_idt_handlers: - early_idt_tramp 0, 63 - early_idt_tramp 64, 127 - early_idt_tramp 128, 191 - early_idt_tramp 192, 255 + .set maxe, NUM_EXCEPTION_VECTORS-1 + early_idt_tramp 0, maxe #endif ENTRY(early_idt_handler) diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 23f0535fec61..ed5131dd7d92 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h @@ -191,13 +191,14 @@ #define SEGMENT_TI_MASK 0x4 #define IDT_ENTRIES 256 +#define NUM_EXCEPTION_VECTORS 32 #define GDT_SIZE (GDT_ENTRIES * 8) #define GDT_ENTRY_TLS_ENTRIES 3 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -extern const char early_idt_handlers[IDT_ENTRIES][10]; +extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; #endif #endif -- cgit v1.2.1 From 749c970ae9fa43b4fcf17ac53022a953007d58f4 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 11 Mar 2008 02:23:22 +0100 Subject: x86: replace early exception setup macro recursion with loop The early exception handlers are currently set up using a macro recursion. There is only one user left. Replace the macro with a standard loop in place. Noop patch, just a cleanup. [ tglx@linutronix.de: simplified ] Signed-off-by: Andi Kleen Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 2c0abe0e3c68..5e0391229502 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -268,18 +268,14 @@ bad_address: jmp bad_address #ifdef CONFIG_EARLY_PRINTK -.macro early_idt_tramp first, last - .ifgt \last-\first - early_idt_tramp \first, \last-1 - .endif - movl $\last,%esi - jmp early_idt_handler -.endm - .globl early_idt_handlers early_idt_handlers: - .set maxe, NUM_EXCEPTION_VECTORS-1 - early_idt_tramp 0, maxe + i = 0 + .rept NUM_EXCEPTION_VECTORS + movl $i, %esi + jmp early_idt_handler + i = i + 1 + .endr #endif ENTRY(early_idt_handler) -- cgit v1.2.1 From 41bd4eac748f39d7f3ed770fae3e595a747172bd Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 11 Mar 2008 02:23:21 +0100 Subject: x86: move early exception handlers into init.text Currently they are in .text.head because the rest of head_64.S. .text.head is not removed as init data, but the early exception handlers should be because they are not needed after early boot of the BP. So move them over. Signed-off-by: Andi Kleen Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 5e0391229502..c1d7a877d814 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -267,6 +267,7 @@ ENTRY(secondary_startup_64) bad_address: jmp bad_address + .section ".init.text","ax" #ifdef CONFIG_EARLY_PRINTK .globl early_idt_handlers early_idt_handlers: @@ -321,6 +322,7 @@ early_idt_msg: early_idt_ripmsg: .asciz "RIP %s\n" #endif /* CONFIG_EARLY_PRINTK */ + .previous .balign PAGE_SIZE -- cgit v1.2.1 From 7d1116a92d709c22e7db910724c9fcd2001b0499 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:27 +0100 Subject: x86: implement true end_pfn_mapped for 32bit Even on 32bit 2MB pages can map more memory than is in the true max_low_pfn if end_pfn is not highmem and not aligned to 2MB. Add a end_pfn_map similar to x86-64 that accounts for this fact. This is important for code that really needs to know about all mapping aliases. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 4 ++++ include/asm-x86/page.h | 4 +++- include/asm-x86/page_64.h | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 00168e65688a..73dd0601166a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -51,6 +51,8 @@ unsigned int __VMALLOC_RESERVE = 128 << 20; +unsigned long end_pfn_map; + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long highstart_pfn, highend_pfn; @@ -194,6 +196,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; + end_pfn_map = pfn; continue; } pte = one_page_table_init(pmd); @@ -208,6 +211,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) set_pte(pte, pfn_pte(pfn, prot)); } + end_pfn_map = pfn; } } } diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index a05b2896492f..b734939916c4 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -36,7 +36,7 @@ #define max_pfn_mapped end_pfn_map #else #include -#define max_pfn_mapped max_low_pfn +#define max_pfn_mapped end_pfn_map #endif /* CONFIG_X86_64 */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) @@ -50,6 +50,8 @@ extern int page_is_ram(unsigned long pagenr); +extern unsigned long end_pfn_map; + struct page; static inline void clear_user_page(void *page, unsigned long vaddr, diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index f156778f707c..54d5db634858 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -59,7 +59,6 @@ void clear_page(void *page); void copy_page(void *to, void *from); extern unsigned long end_pfn; -extern unsigned long end_pfn_map; extern unsigned long phys_base; extern unsigned long __phys_addr(unsigned long); -- cgit v1.2.1 From 67794292c8615b05f46419ba8d4fd99e7c9a5db9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 21 Mar 2008 21:27:10 +0100 Subject: x86: replace the now useless max_pfn_mapped define Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/e820_64.c | 28 ++++++++++++++-------------- arch/x86/kernel/setup_64.c | 2 +- arch/x86/mm/init_32.c | 6 +++--- arch/x86/mm/init_64.c | 2 +- include/asm-x86/page.h | 4 +--- 6 files changed, 21 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 956b60f3ebd5..e277c370246d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -115,7 +115,7 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) if (!phys_addr || !size) return NULL; - if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE) + if (phys_addr+size <= (max_pfn_mapped << PAGE_SHIFT) + PAGE_SIZE) return __va(phys_addr); return NULL; diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index d6ada0833876..a720f3d5ed9d 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -36,11 +36,11 @@ struct e820map e820; unsigned long end_pfn; /* - * end_pfn only includes RAM, while end_pfn_map includes all e820 entries. - * The direct mapping extends to end_pfn_map, so that we can directly access + * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. + * The direct mapping extends to max_pfn_mapped, so that we can directly access * apertures, ACPI and other tables without having to play with fixmaps. */ -unsigned long end_pfn_map; +unsigned long max_pfn_mapped; /* * Last pfn which the user wants to use. @@ -281,16 +281,16 @@ unsigned long __init e820_end_of_ram(void) end_pfn = find_max_pfn_with_active_regions(); - if (end_pfn > end_pfn_map) - end_pfn_map = end_pfn; - if (end_pfn_map > MAXMEM>>PAGE_SHIFT) - end_pfn_map = MAXMEM>>PAGE_SHIFT; + if (end_pfn > max_pfn_mapped) + max_pfn_mapped = end_pfn; + if (max_pfn_mapped > MAXMEM>>PAGE_SHIFT) + max_pfn_mapped = MAXMEM>>PAGE_SHIFT; if (end_pfn > end_user_pfn) end_pfn = end_user_pfn; - if (end_pfn > end_pfn_map) - end_pfn = end_pfn_map; + if (end_pfn > max_pfn_mapped) + end_pfn = max_pfn_mapped; - printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map); + printk(KERN_INFO "max_pfn_mapped = %lu\n", max_pfn_mapped); return end_pfn; } @@ -366,9 +366,9 @@ static int __init e820_find_active_region(const struct e820entry *ei, if (*ei_startpfn >= *ei_endpfn) return 0; - /* Check if end_pfn_map should be updated */ - if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map) - end_pfn_map = *ei_endpfn; + /* Check if max_pfn_mapped should be updated */ + if (ei->type != E820_RAM && *ei_endpfn > max_pfn_mapped) + max_pfn_mapped = *ei_endpfn; /* Skip if map is outside the node */ if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || @@ -759,7 +759,7 @@ static int __init parse_memmap_opt(char *p) saved_max_pfn = e820_end_of_ram(); remove_all_active_ranges(); #endif - end_pfn_map = 0; + max_pfn_mapped = 0; e820.nr_map = 0; userdef = 1; return 0; diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index c6fe1e4bc7c2..413b8fc31545 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -347,7 +347,7 @@ void __init setup_arch(char **cmdline_p) check_efer(); - init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); + init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); if (efi_enabled) efi_init(); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 73dd0601166a..fc3ace2e88f1 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -51,7 +51,7 @@ unsigned int __VMALLOC_RESERVE = 128 << 20; -unsigned long end_pfn_map; +unsigned long max_pfn_mapped; DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long highstart_pfn, highend_pfn; @@ -196,7 +196,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; - end_pfn_map = pfn; + max_pfn_mapped = pfn; continue; } pte = one_page_table_init(pmd); @@ -211,7 +211,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) set_pte(pte, pfn_pte(pfn, prot)); } - end_pfn_map = pfn; + max_pfn_mapped = pfn; } } } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 210243e94d84..ef9e9cfb1fc2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -791,7 +791,7 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len) * This can happen with kdump kernels when accessing * firmware tables: */ - if (pfn < end_pfn_map) + if (pfn < max_pfn_mapped) return; printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index b734939916c4..6724a4bc6b7a 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -33,10 +33,8 @@ #ifdef CONFIG_X86_64 #include -#define max_pfn_mapped end_pfn_map #else #include -#define max_pfn_mapped end_pfn_map #endif /* CONFIG_X86_64 */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) @@ -50,7 +48,7 @@ extern int page_is_ram(unsigned long pagenr); -extern unsigned long end_pfn_map; +extern unsigned long max_pfn_mapped; struct page; -- cgit v1.2.1 From cc6150321903ca4c3bc9d53b0cdafb05d77d64d0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:28 +0100 Subject: x86: account overlapped mappings in max_pfn_mapped When end_pfn is not aligned to 2MB (or 1GB) then the kernel might map more memory than end_pfn. Account this in max_pfn_mapped. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 2 +- arch/x86/mm/init_64.c | 34 +++++++++++++++++++++++----------- include/asm-x86/page_64.h | 3 +++ include/asm-x86/proto.h | 2 -- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 413b8fc31545..3d76dbd9f2c0 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -347,7 +347,7 @@ void __init setup_arch(char **cmdline_p) check_efer(); - init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); + max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); if (efi_enabled) efi_init(); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ef9e9cfb1fc2..1076097dcab2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -313,7 +313,7 @@ __meminit void early_iounmap(void *addr, unsigned long size) __flush_tlb_all(); } -static void __meminit +static unsigned long __meminit phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) { int i = pmd_index(address); @@ -335,21 +335,26 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) set_pte((pte_t *)pmd, pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); } + return address; } -static void __meminit +static unsigned long __meminit phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) { pmd_t *pmd = pmd_offset(pud, 0); + unsigned long last_map_addr; + spin_lock(&init_mm.page_table_lock); - phys_pmd_init(pmd, address, end); + last_map_addr = phys_pmd_init(pmd, address, end); spin_unlock(&init_mm.page_table_lock); __flush_tlb_all(); + return last_map_addr; } -static void __meminit +static unsigned long __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) { + unsigned long last_map_addr = end; int i = pud_index(addr); for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { @@ -368,13 +373,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) if (pud_val(*pud)) { if (!pud_large(*pud)) - phys_pmd_update(pud, addr, end); + last_map_addr = phys_pmd_update(pud, addr, end); continue; } if (direct_gbpages) { set_pte((pte_t *)pud, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); + last_map_addr = (addr & PUD_MASK) + PUD_SIZE; continue; } @@ -382,12 +388,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) spin_lock(&init_mm.page_table_lock); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); - phys_pmd_init(pmd, addr, end); + last_map_addr = phys_pmd_init(pmd, addr, end); spin_unlock(&init_mm.page_table_lock); unmap_low_page(pmd); } __flush_tlb_all(); + + return last_map_addr >> PAGE_SHIFT; } static void __init find_early_table_space(unsigned long end) @@ -542,9 +550,9 @@ static void __init early_memtest(unsigned long start, unsigned long end) * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ -void __init_refok init_memory_mapping(unsigned long start, unsigned long end) +unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) { - unsigned long next; + unsigned long next, last_map_addr = end; unsigned long start_phys = start, end_phys = end; printk(KERN_INFO "init_memory_mapping\n"); @@ -577,7 +585,7 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) next = start + PGDIR_SIZE; if (next > end) next = end; - phys_pud_init(pud, __pa(start), __pa(next)); + last_map_addr = phys_pud_init(pud, __pa(start), __pa(next)); if (!after_bootmem) set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); unmap_low_page(pud); @@ -593,6 +601,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) if (!after_bootmem) early_memtest(start_phys, end_phys); + + return last_map_addr; } #ifndef CONFIG_NUMA @@ -632,11 +642,13 @@ int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdat = NODE_DATA(nid); struct zone *zone = pgdat->node_zones + ZONE_NORMAL; - unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - init_memory_mapping(start, start + size-1); + last_mapped_pfn = init_memory_mapping(start, start + size-1); + if (last_mapped_pfn > max_pfn_mapped) + max_pfn_mapped = last_mapped_pfn; ret = __add_pages(zone, start_pfn, nr_pages); WARN_ON(1); diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 54d5db634858..6ea72859c491 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -80,6 +80,9 @@ typedef struct { pteval_t pte; } pte_t; #define vmemmap ((struct page *)VMEMMAP_START) +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); + #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 9da46af3b0e9..1e17bcce450e 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h @@ -7,8 +7,6 @@ extern void early_idt_handler(void); -extern void init_memory_mapping(unsigned long start, unsigned long end); - extern void system_call(void); extern void syscall_init(void); -- cgit v1.2.1 From c9caa02c529d5e113e40cbc77254558fcdfa4215 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:29 +0100 Subject: x86: add set_memory_4k to pageattr.c Add a new function to force split large pages into 4k pages. This is needed for some followup optimizations. I had to add a new field to cpa_data to pass down the information that try_preserve_large_page should not run. Right now no set_page_4k() because I didn't need it and all the specialized users I have in mind would be more comfortable with pure addresses. I also didn't export it because it's unlikely external code needs it. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 20 ++++++++++++++++---- include/asm-x86/cacheflush.h | 1 + 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 270cab2e6030..7d9517abc9af 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -31,6 +31,7 @@ struct cpa_data { int numpages; int flushtlb; unsigned long pfn; + unsigned force_split : 1; }; #ifdef CONFIG_X86_64 @@ -262,6 +263,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, int i, do_split = 1; unsigned int level; + if (cpa->force_split) + return 1; + spin_lock_irqsave(&pgd_lock, flags); /* * Check for races, another CPU might have split this page @@ -696,7 +700,8 @@ static inline int cache_attr(pgprot_t attr) } static int change_page_attr_set_clr(unsigned long addr, int numpages, - pgprot_t mask_set, pgprot_t mask_clr) + pgprot_t mask_set, pgprot_t mask_clr, + int force_split) { struct cpa_data cpa; int ret, cache, checkalias; @@ -707,7 +712,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, */ mask_set = canon_pgprot(mask_set); mask_clr = canon_pgprot(mask_clr); - if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) + if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) return 0; /* Ensure we are PAGE_SIZE aligned */ @@ -724,6 +729,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, cpa.mask_set = mask_set; cpa.mask_clr = mask_clr; cpa.flushtlb = 0; + cpa.force_split = force_split; /* No alias checking for _NX bit modifications */ checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; @@ -762,13 +768,13 @@ out: static inline int change_page_attr_set(unsigned long addr, int numpages, pgprot_t mask) { - return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); + return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0); } static inline int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t mask) { - return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); + return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0); } int _set_memory_uc(unsigned long addr, int numpages) @@ -847,6 +853,12 @@ int set_memory_np(unsigned long addr, int numpages) return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); } +int set_memory_4k(unsigned long addr, int numpages) +{ + return change_page_attr_set_clr(addr, numpages, __pgprot(0), + __pgprot(0), 1); +} + int set_pages_uc(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 7ab5b520b7bd..cb1d6f8fd003 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -45,6 +45,7 @@ int set_memory_nx(unsigned long addr, int numpages); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); +int set_memory_4k(unsigned long addr, int numpages); void clflush_cache_range(void *addr, unsigned int size); -- cgit v1.2.1 From f5c24a7fd0798d636af184cc7032e7e0cb149112 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:30 +0100 Subject: x86: don't use large pages to map the first 2/4MB of memory Intel recommends to not use large pages for the first 1MB of the physical memory because there are fixed size MTRRs there which cause splitups in the TLBs. On AMD doing so is also a good idea. The implementation is a little different between 32bit and 64bit. On 32bit I just taught the initial page table set up about this because it was very simple to do. This also has the advantage that the risk of a prefetch ever seeing the page even if it only exists for a short time is minimized. On 64bit that is not quite possible, so use set_memory_4k() a little later (in check_bugs) instead. Signed-off-by: Andi Kleen Acked-by: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/bugs_64.c | 12 ++++++++++++ arch/x86/mm/init_32.c | 7 ++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/bugs_64.c b/arch/x86/kernel/bugs_64.c index 60207e999a04..9a3ed0649d4e 100644 --- a/arch/x86/kernel/bugs_64.c +++ b/arch/x86/kernel/bugs_64.c @@ -9,6 +9,7 @@ #include #include #include +#include void __init check_bugs(void) { @@ -18,4 +19,15 @@ void __init check_bugs(void) print_cpu_info(&boot_cpu_data); #endif alternative_instructions(); + + /* + * Make sure the first 2MB area is not mapped by huge pages + * There are typically fixed size MTRRs in there and overlapping + * MTRRs into large pages causes slow downs. + * + * Right now we don't do that with gbpages because there seems + * very little benefit for that case. + */ + if (!direct_gbpages) + set_memory_4k((unsigned long)__va(0), 1); } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index fc3ace2e88f1..1500dc8d63e4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -181,8 +181,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) /* * Map with big pages if possible, otherwise * create normal page tables: + * + * Don't use a large page for the first 2/4MB of memory + * because there are often fixed size MTRRs in there + * and overlapping MTRRs into large pages can cause + * slowdowns. */ - if (cpu_has_pse) { + if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; -- cgit v1.2.1 From 1de87bd40e119d26533b5135677901990390bfa9 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 22 Mar 2008 10:59:28 +0100 Subject: x86: re-add rdmsrl_safe RDMSR for 64bit values with exception handling. Makes it easier to deal with 64bit valued MSRs. The old 64bit code base had that too as checking_rdmsrl(), but it got dropped somehow. Signed-off-by: Andi Kleen Cc: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-x86/msr.h | 8 ++++++++ include/asm-x86/paravirt.h | 7 +++++++ 2 files changed, 15 insertions(+) diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 2c698a2e81f9..3707650a169b 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -150,6 +150,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) __err; \ }) +static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +{ + int err; + + *p = native_read_msr_safe(msr, &err); + return err; +} + #define rdtscl(low) \ ((low) = (u32)native_read_tsc()) diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 0c23f7940bc4..3d419398499b 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -693,6 +693,13 @@ do { \ _err; \ }) +static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +{ + int err; + + *p = paravirt_read_msr(msr, &err); + return err; +} static inline u64 paravirt_read_tsc(void) { -- cgit v1.2.1 From 8346ea17aa20e9864b0f7dc03d55f3cd5620b8c1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 12 Mar 2008 03:53:32 +0100 Subject: x86: split large page mapping for AMD TSEG On AMD SMM protected memory is part of the address map, but handled internally like an MTRR. That leads to large pages getting split internally which has some performance implications. Check for the AMD TSEG MSR and split the large page mapping on that area explicitely if it is part of the direct mapping. There is also SMM ASEG, but it is in the first 1MB and already covered by the earlier split first page patch. Idea for this came from an earlier patch by Andreas Herrmann On a RevF dual Socket Opteron system kernbench shows a clear improvement from this: (together with the earlier patches in this series, especially the split first 2MB patch) [lower is better] no split stddev split stddev delta Elapsed Time 87.146 (0.727516) 84.296 (1.09098) -3.2% User Time 274.537 (4.05226) 273.692 (3.34344) -0.3% System Time 34.907 (0.42492) 34.508 (0.26832) -1.1% Percent CPU 322.5 (38.3007) 326.5 (44.5128) +1.2% => About 3.2% improvement in elapsed time for kernbench. With GB pages on AMD Fam1h the impact of splitting is much higher of course, since it would split two full GB pages (together with the first 1MB split patch) instead of two 2MB pages. I could not benchmark a clear difference in kernbench on gbpages, so I kept it disabled for that case That was only limited benchmarking of course, so if someone was interested in running more tests for the gbpages case that could be revisited (contributions welcome) I didn't bother implementing this for 32bit because it is very unlikely the 32bit lowmem mapping overlaps into the TSEG near 4GB and the 2MB low split is already handled for both. [ mingo@elte.hu: do it on gbpages kernels too, there's no clear reason why it shouldnt help there. ] Signed-off-by: Andi Kleen Acked-by: andreas.herrmann3@amd.com Cc: mingo@elte.hu Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 13 +++++++++++++ include/asm-x86/msr-index.h | 1 + 2 files changed, 14 insertions(+) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 3d76dbd9f2c0..b5425979501c 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -729,6 +729,19 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (amd_apic_timer_broken()) disable_apic_timer = 1; + + if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { + unsigned long long tseg; + + /* + * Split up direct mapping around the TSEG SMM area. + * Don't do it for gbpages because there seems very little + * benefit in doing so. + */ + if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) && + (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT))) + set_memory_4k((unsigned long)__va(tseg), 1); + } } void __cpuinit detect_ht(struct cpuinfo_x86 *c) diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index af4e07f661b8..09413ad39d3c 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h @@ -112,6 +112,7 @@ #define MSR_K8_SYSCFG 0xc0010010 #define MSR_K8_HWCR 0xc0010015 #define MSR_K8_ENABLE_C1E 0xc0010055 +#define MSR_K8_TSEG_ADDR 0xc0010112 #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ -- cgit v1.2.1 From 5af5573ee06c361378e22a9dd71dae0320e841f7 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 25 Mar 2008 13:28:56 -0300 Subject: x86: move ipi definitions to mach_ipi.h take them out of the x86_64-only asm/mach_apic.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 2 ++ arch/x86/kernel/crash.c | 4 ---- arch/x86/kernel/io_apic_64.c | 2 ++ arch/x86/kernel/smp.c | 6 +----- arch/x86/kernel/tlb_64.c | 3 ++- include/asm-x86/mach-default/mach_ipi.h | 10 ++++++++++ include/asm-x86/mach_apic.h | 3 --- 7 files changed, 17 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 5362cfd30ecd..206278f1c6f4 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -41,6 +41,8 @@ #include #include +#include + int disable_apic_timer __cpuinitdata; static int apic_calibrate_pmtmr __initdata; int disable_apic; diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 9a5fa0abfcc7..2251d0ae9570 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -26,11 +26,7 @@ #include #include -#ifdef CONFIG_X86_32 #include -#else -#include -#endif /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 1627c0d53e0b..7d5cdf320eba 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -50,6 +50,8 @@ #include #include +#include + struct irq_cfg { cpumask_t domain; cpumask_t old_domain; diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 16c52aaaca35..8f75893a6467 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -26,12 +26,8 @@ #include #include #include -#ifdef CONFIG_X86_32 -#include #include -#else -#include -#endif +#include /* * Some notes on x86 processor bugs affecting SMP operation: * diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 615d84817758..1558e513757e 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -11,11 +11,12 @@ #include #include #include -#include #include #include #include #include + +#include /* * Smarter SMP flushing macros. * c/o Linus Torvalds. diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h index 0dba244c86db..be323364e68f 100644 --- a/include/asm-x86/mach-default/mach_ipi.h +++ b/include/asm-x86/mach-default/mach_ipi.h @@ -9,10 +9,15 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector); extern int no_broadcast; +#ifdef CONFIG_X86_64 +#include +#define send_IPI_mask (genapic->send_IPI_mask) +#else static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_bitmask(mask, vector); } +#endif static inline void __local_send_IPI_allbutself(int vector) { @@ -33,6 +38,10 @@ static inline void __local_send_IPI_all(int vector) __send_IPI_shortcut(APIC_DEST_ALLINC, vector); } +#ifdef CONFIG_X86_64 +#define send_IPI_allbutself (genapic->send_IPI_allbutself) +#define send_IPI_all (genapic->send_IPI_all) +#else static inline void send_IPI_allbutself(int vector) { /* @@ -50,5 +59,6 @@ static inline void send_IPI_all(int vector) { __local_send_IPI_all(vector); } +#endif #endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h index 7b7115a0c1c9..1bc68c0c0cdf 100644 --- a/include/asm-x86/mach_apic.h +++ b/include/asm-x86/mach_apic.h @@ -20,9 +20,6 @@ #define vector_allocation_domain (genapic->vector_allocation_domain) #define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) -#define send_IPI_mask (genapic->send_IPI_mask) -#define send_IPI_allbutself (genapic->send_IPI_allbutself) -#define send_IPI_all (genapic->send_IPI_all) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) #define phys_pkg_id (genapic->phys_pkg_id) -- cgit v1.2.1 From 756a6c68556600aec9460346332884d891d5beb4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 25 Mar 2008 08:31:17 +0100 Subject: x86: ioremap of 64-bit resource on 32-bit kernel fix Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 7338c5d3dd37..c590fd200e29 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -47,7 +47,7 @@ static inline int phys_addr_valid(unsigned long addr) int page_is_ram(unsigned long pagenr) { - unsigned long addr, end; + resource_size_t addr, end; int i; /* @@ -120,7 +120,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { - unsigned long pfn, offset, last_addr, vaddr; + unsigned long pfn, offset, vaddr; + resource_size_t last_addr; struct vm_struct *area; unsigned long new_prot_val; pgprot_t prot; -- cgit v1.2.1 From ab68ed98f665436601feec853c8f400d28c39e92 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 25 Mar 2008 22:16:32 +0300 Subject: x86: entry_32.S - use flags from processor-flags.h By including processor-flags.h we are allowed to use predefined macroses instead of keeping own ones Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index a664d5726d8d..9ba49a26dff8 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -51,6 +51,7 @@ #include #include #include +#include #include "irq_vectors.h" /* @@ -68,13 +69,6 @@ #define nr_syscalls ((syscall_table_size)/4) -CF_MASK = 0x00000001 -TF_MASK = 0x00000100 -IF_MASK = 0x00000200 -DF_MASK = 0x00000400 -NT_MASK = 0x00004000 -VM_MASK = 0x00020000 - #ifdef CONFIG_PREEMPT #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF #else @@ -84,7 +78,7 @@ VM_MASK = 0x00020000 .macro TRACE_IRQS_IRET #ifdef CONFIG_TRACE_IRQFLAGS - testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off? + testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? jz 1f TRACE_IRQS_ON 1: @@ -246,7 +240,7 @@ ret_from_intr: check_userspace: movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al - andl $(VM_MASK | SEGMENT_RPL_MASK), %eax + andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax cmpl $USER_RPL, %eax jb resume_kernel # not returning to v8086 or userspace @@ -271,7 +265,7 @@ need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl jz restore_all - testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ? + testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq jmp need_resched @@ -388,7 +382,7 @@ syscall_exit: # setting need_resched or sigpending # between sampling and the iret TRACE_IRQS_OFF - testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit + testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit jz no_singlestep orl $_TIF_SINGLESTEP,TI_flags(%ebp) no_singlestep: @@ -403,7 +397,7 @@ restore_all: # See comments in process.c:copy_thread() for details. movb PT_OLDSS(%esp), %ah movb PT_CS(%esp), %al - andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax + andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax CFI_REMEMBER_STATE je ldt_ss # returning to user-space with LDT SS @@ -490,7 +484,7 @@ work_resched: work_notifysig: # deal with pending signals and # notify-resume requests #ifdef CONFIG_VM86 - testl $VM_MASK, PT_EFLAGS(%esp) + testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) movl %esp, %eax jne work_notifysig_v86 # returning to kernel-space or # vm86-space -- cgit v1.2.1 From dd46e3ca73d136aa7f9f1813e4cbb6934c3611cc Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 25 Mar 2008 18:10:46 -0300 Subject: x86: move apic declarations to mach_apic.h take them out of the x86_64-specific asm/mach_apic.h Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 2 +- arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/io_apic_64.c | 2 +- arch/x86/kernel/setup_64.c | 2 +- include/asm-x86/mach-default/mach_apic.h | 83 ++++++++++++++++++-------------- include/asm-x86/mach_apic.h | 26 ---------- 6 files changed, 50 insertions(+), 67 deletions(-) delete mode 100644 include/asm-x86/mach_apic.h diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 206278f1c6f4..7dd6250aaf6c 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -42,6 +41,7 @@ #include #include +#include int disable_apic_timer __cpuinitdata; static int apic_calibrate_pmtmr __initdata; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 33d38f8305ee..0173065dc3b7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -4,8 +4,8 @@ #include #include #include -#include +#include #include "cpu.h" /* diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 7d5cdf320eba..6dd33628f28a 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -51,6 +50,7 @@ #include #include +#include struct irq_cfg { cpumask_t domain; diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index b5425979501c..540686be35d0 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include #include @@ -67,6 +66,7 @@ #include #include +#include #ifdef CONFIG_PARAVIRT #include #else diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 13900e8cc1ab..1f56e7d5bfdd 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -1,6 +1,8 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H +#ifdef CONFIG_X86_LOCAL_APIC + #include #include @@ -14,24 +16,25 @@ static inline cpumask_t target_cpus(void) return cpumask_of_cpu(0); #endif } -#define TARGET_CPUS (target_cpus()) #define NO_BALANCE_IRQ (0) #define esr_disable (0) +#ifdef CONFIG_X86_64 +#include +#define INT_DELIVERY_MODE (genapic->int_delivery_mode) +#define INT_DEST_MODE (genapic->int_dest_mode) +#define TARGET_CPUS (genapic->target_cpus()) +#define apic_id_registered (genapic->apic_id_registered) +#define init_apic_ldr (genapic->init_apic_ldr) +#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) +#define phys_pkg_id (genapic->phys_pkg_id) +#define vector_allocation_domain (genapic->vector_allocation_domain) +extern void setup_apic_routing(void); +#else #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - +#define TARGET_CPUS (target_cpus()) /* * Set up the logical destination ID. * @@ -49,32 +52,52 @@ static inline void init_apic_ldr(void) apic_write_around(APIC_LDR, val); } -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +static inline int apic_id_registered(void) { - return phys_map; + return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); +} + +static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +{ + return cpus_addr(cpumask)[0]; +} + +static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +{ + return cpuid_apic >> index_msb; } -#ifdef CONFIG_X86_64 -extern void setup_apic_routing(void); -#else static inline void setup_apic_routing(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); } -#endif -static inline int multi_timer_check(int apic, int irq) +static inline int apicid_to_node(int logical_apicid) { return 0; } +#endif -#ifdef CONFIG_X86_32 -static inline int apicid_to_node(int logical_apicid) +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long check_apicid_present(int bit) +{ + return physid_isset(bit, phys_cpu_present_map); +} + +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +{ + return phys_map; +} + +static inline int multi_timer_check(int apic, int irq) { return 0; } -#endif /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) @@ -109,23 +132,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); } -static inline int apic_id_registered(void) -{ - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return cpus_addr(cpumask)[0]; -} - static inline void enable_apic_mode(void) { } -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - +#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h deleted file mode 100644 index 1bc68c0c0cdf..000000000000 --- a/include/asm-x86/mach_apic.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -/* - * Copyright 2004 James Cleverdon, IBM. - * Subject to the GNU Public License, v.2 - * - * Generic APIC sub-arch defines. - * - * Hacked for x86-64 by James Cleverdon from i386 architecture code by - * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and - * James Cleverdon. - */ - -#include - -#define INT_DELIVERY_MODE (genapic->int_delivery_mode) -#define INT_DEST_MODE (genapic->int_dest_mode) -#define TARGET_CPUS (genapic->target_cpus()) -#define vector_allocation_domain (genapic->vector_allocation_domain) -#define apic_id_registered (genapic->apic_id_registered) -#define init_apic_ldr (genapic->init_apic_ldr) -#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) -#define phys_pkg_id (genapic->phys_pkg_id) - -#endif /* __ASM_MACH_APIC_H */ -- cgit v1.2.1 From 537e33136443bcd53ee13bc32a8f0fa46b1f3fdb Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Wed, 26 Mar 2008 02:16:15 +0100 Subject: x86 floppy: kill off the 'register' keyword from header When compilers became generally better at optimizing code than humans, the register keyword became mostly useless. For the floppy driver it certainly is since it's so slow compared to the rest of the system that optimizing access to a single variable or two isn't going to make any real difference So let's just leave it to the compiler - it'll do a better job anyway. This patch does away with a few register keywords in the x86 floppy driver. Signed-off-by: Jesper Juhl Signed-off-by: Ingo Molnar --- include/asm-x86/floppy.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index 438b3033a250..dbe82a5c5eac 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h @@ -53,7 +53,7 @@ static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id) { - register unsigned char st; + unsigned char st; #undef TRACE_FLPY_INT @@ -71,8 +71,8 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) #endif { - register int lcount; - register char *lptr; + int lcount; + char *lptr; st = 1; for (lcount = virtual_dma_count, lptr = virtual_dma_addr; -- cgit v1.2.1 From aa040b2f0693695ae393cd9b8a93055952dbf76f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 22 Mar 2008 13:27:38 -0700 Subject: x86: simplify sync_test_bit(), improve Using a naked parameterless macro could lead to other tokens being unexpectedly replaced. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- include/asm-x86/sync_bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index f1078a5e4ed7..b47a1d0b8a83 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -123,7 +123,7 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) return oldbit; } -#define sync_test_bit test_bit +#define sync_test_bit(nr, addr) test_bit(nr, addr) #undef ADDR -- cgit v1.2.1 From 0e03eb86b51b21054aea01ada1d03e9c2265dd20 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 26 Mar 2008 12:09:16 -0400 Subject: x86: Centaur Isaiah processor to use sysenter in 64-bit compatibility mode rather than syscall Upcoming 64 bit processors from Centaur can use sysenter. Signed-off-by: Dave Jones Signed-off-by: Jesse Ahrens Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_64.c | 35 +++++++++++++++++++++++++++++++++++ arch/x86/vdso/vdso32-setup.c | 8 ++++++-- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 540686be35d0..b80300710c08 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -885,6 +885,32 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) srat_detect_node(); } +static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) +{ + if (c->x86 == 0x6 && c->x86_model >= 0xf) + set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); +} + +static void __cpuinit init_centaur(struct cpuinfo_x86 *c) +{ + /* Cache sizes */ + unsigned n; + + n = c->extended_cpuid_level; + if (n >= 0x80000008) { + unsigned eax = cpuid_eax(0x80000008); + c->x86_virt_bits = (eax >> 8) & 0xff; + c->x86_phys_bits = eax & 0xff; + } + + if (c->x86 == 0x6 && c->x86_model >= 0xf) { + c->x86_cache_alignment = c->x86_clflush_size * 2; + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_REP_GOOD); + } + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); +} + static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; @@ -893,6 +919,8 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) c->x86_vendor = X86_VENDOR_AMD; else if (!strcmp(v, "GenuineIntel")) c->x86_vendor = X86_VENDOR_INTEL; + else if (!strcmp(v, "CentaurHauls")) + c->x86_vendor = X86_VENDOR_CENTAUR; else c->x86_vendor = X86_VENDOR_UNKNOWN; } @@ -989,6 +1017,9 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) set_cpu_cap(c, X86_FEATURE_PAT); break; + case X86_VENDOR_CENTAUR: + early_init_centaur(c); + break; } } @@ -1025,6 +1056,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) init_intel(c); break; + case X86_VENDOR_CENTAUR: + init_centaur(c); + break; + case X86_VENDOR_UNKNOWN: default: display_cacheinfo(c); diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index f7e78d84fc01..e2af8eee80e3 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -210,8 +210,12 @@ static int use_sysenter __read_mostly = -1; /* May not be __init: called during resume */ void syscall32_cpu_init(void) { - if (use_sysenter < 0) - use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL); + if (use_sysenter < 0) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + use_sysenter = 1; + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + use_sysenter = 1; + } /* Load these always in case some future AMD CPU supports SYSENTER from compat mode too. */ -- cgit v1.2.1 From 7219bebd72726c13c1eaaa3ade0e829e998fb3b1 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Thu, 17 Apr 2008 17:41:31 +0200 Subject: x86: add comments to describe the new api's in cacheflush.h The new cacheflush.h API's didn't have any comments describing how they're to be used yet and the conventions around these functions. This patch adds comments to this effect; in order for that to be a logical series, some prototypes had to move around. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar --- include/asm-x86/cacheflush.h | 63 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 8 deletions(-) diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index cb1d6f8fd003..f4c0ab50d2c2 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -24,15 +24,34 @@ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy((dst), (src), (len)) -int __deprecated_for_modules change_page_attr(struct page *page, int numpages, - pgprot_t prot); -int set_pages_uc(struct page *page, int numpages); -int set_pages_wb(struct page *page, int numpages); -int set_pages_x(struct page *page, int numpages); -int set_pages_nx(struct page *page, int numpages); -int set_pages_ro(struct page *page, int numpages); -int set_pages_rw(struct page *page, int numpages); +/* + * The set_memory_* API can be used to change various attributes of a virtual + * address range. The attributes include: + * Cachability : UnCached, WriteCombining, WriteBack + * Executability : eXeutable, NoteXecutable + * Read/Write : ReadOnly, ReadWrite + * Presence : NotPresent + * + * Within a catagory, the attributes are mutually exclusive. + * + * The implementation of this API will take care of various aspects that + * are associated with changing such attributes, such as: + * - Flushing TLBs + * - Flushing CPU caches + * - Making sure aliases of the memory behind the mapping don't violate + * coherency rules as defined by the CPU in the system. + * + * What this API does not do: + * - Provide exclusion between various callers - including callers that + * operation on other mappings of the same physical page + * - Restore default attributes when a page is freed + * - Guarantee that mappings other than the requested one are + * in any state, other than that these do not violate rules for + * the CPU you have. Do not depend on any effects on other mappings, + * CPUs other than the one you have may have more relaxed rules. + * The caller is required to take care of these. + */ int _set_memory_uc(unsigned long addr, int numpages); int _set_memory_wc(unsigned long addr, int numpages); @@ -47,6 +66,34 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages); +/* + * For legacy compatibility with the old APIs, a few functions + * are provided that work on a "struct page". + * These functions operate ONLY on the 1:1 kernel mapping of the + * memory that the struct page represents, and internally just + * call the set_memory_* function. See the description of the + * set_memory_* function for more details on conventions. + * + * These APIs should be considered *deprecated* and are likely going to + * be removed in the future. + * The reason for this is the implicit operation on the 1:1 mapping only, + * making this not a generally useful API. + * + * Specifically, many users of the old APIs had a virtual address, + * called virt_to_page() or vmalloc_to_page() on that address to + * get a struct page* that the old API required. + * To convert these cases, use set_memory_*() on the original + * virtual address, do not use these functions. + */ + +int set_pages_uc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); +int set_pages_x(struct page *page, int numpages); +int set_pages_nx(struct page *page, int numpages); +int set_pages_ro(struct page *page, int numpages); +int set_pages_rw(struct page *page, int numpages); + + void clflush_cache_range(void *addr, unsigned int size); void cpa_init(void); -- cgit v1.2.1 From 2df297261903249f5ac2d3d14ededbda229397e2 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:53:54 +0300 Subject: x86: move es7000_plat closer to its user Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 1b225cea5d7b..4cc325edc70d 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -867,8 +867,6 @@ void __init find_smp_config (void) smp_scan_config(address, 0x400); } -int es7000_plat; - /* -------------------------------------------------------------------------- ACPI-based MP Configuration -------------------------------------------------------------------------- */ @@ -1029,6 +1027,8 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) panic("Max # of irq sources exceeded!\n"); } +int es7000_plat; + void __init mp_config_acpi_legacy_irqs (void) { struct mpc_config_intsrc intsrc; -- cgit v1.2.1 From 987dd2d4d465e80e00d6a0b16787a78aa75dd66a Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:06 +0300 Subject: x86: don't call MP_processor_info for disabled cpu Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 4cc325edc70d..c487bc99d7a3 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -895,6 +895,10 @@ void __cpuinit mp_register_lapic (u8 id, u8 enabled) id, MAX_APICS); return; } + if (!enabled) { + ++disabled_cpus; + return; + } if (id == boot_cpu_physical_apicid) boot_cpu = 1; -- cgit v1.2.1 From c853c67690448415af2d204062028d1456f524de Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:13 +0300 Subject: x86: separate generic_processor_info into its own function Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 158 ++++++++++++++++++++++--------------------- 1 file changed, 82 insertions(+), 76 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index c487bc99d7a3..b0aed978eef1 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -109,12 +109,91 @@ static int mpc_record; static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata; #endif -static void __cpuinit MP_processor_info (struct mpc_config_processor *m) +static void __cpuinit generic_processor_info(int apicid, int version) { - int ver, apicid, cpu; + int cpu; cpumask_t tmp_map; physid_mask_t phys_cpu; + + /* + * Validate version + */ + if (version == 0x0) { + printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " + "fixing up to 0x10. (tell your hw vendor)\n", + version); + version = 0x10; + } + apic_version[apicid] = version; + + phys_cpu = apicid_to_cpu_present(apicid); + physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); + + if (num_processors >= NR_CPUS) { + printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." + " Processor ignored.\n", NR_CPUS); + return; + } + + if (num_processors >= maxcpus) { + printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." + " Processor ignored.\n", maxcpus); + return; + } + + num_processors++; + cpus_complement(tmp_map, cpu_present_map); + cpu = first_cpu(tmp_map); + + if (apicid == boot_cpu_physical_apicid) + /* + * x86_bios_cpu_apicid is required to have processors listed + * in same order as logical cpu numbers. Hence the first + * entry is BSP, and so on. + */ + cpu = 0; + + /* + * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y + * but we need to work other dependencies like SMP_SUSPEND etc + * before this can be done without some confusion. + * if (CPU_HOTPLUG_ENABLED || num_processors > 8) + * - Ashok Raj + */ + if (num_processors > 8) { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (!APIC_XAPIC(version)) { + def_to_bigsmp = 0; + break; + } + /* If P4 and above fall through */ + case X86_VENDOR_AMD: + def_to_bigsmp = 1; + } + } +#ifdef CONFIG_SMP + /* are we being called early in kernel startup? */ + if (x86_cpu_to_apicid_early_ptr) { + u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; + u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; + + cpu_to_apicid[cpu] = apicid; + bios_cpu_apicid[cpu] = apicid; + } else { + per_cpu(x86_cpu_to_apicid, cpu) = apicid; + per_cpu(x86_bios_cpu_apicid, cpu) = apicid; + } +#endif + cpu_set(cpu, cpu_possible_map); + cpu_set(cpu, cpu_present_map); +} + +static void __cpuinit MP_processor_info(struct mpc_config_processor *m) +{ + int apicid; + if (!(m->mpc_cpuflag & CPU_ENABLED)) { disabled_cpus++; return; @@ -184,80 +263,7 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) boot_cpu_physical_apicid = m->mpc_apicid; } - ver = m->mpc_apicver; - - /* - * Validate version - */ - if (ver == 0x0) { - printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - m->mpc_apicid); - ver = 0x10; - } - apic_version[m->mpc_apicid] = ver; - - phys_cpu = apicid_to_cpu_present(apicid); - physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); - - if (num_processors >= NR_CPUS) { - printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." - " Processor ignored.\n", NR_CPUS); - return; - } - - if (num_processors >= maxcpus) { - printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." - " Processor ignored.\n", maxcpus); - return; - } - - num_processors++; - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); - - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) - /* - * x86_bios_cpu_apicid is required to have processors listed - * in same order as logical cpu numbers. Hence the first - * entry is BSP, and so on. - */ - cpu = 0; - - /* - * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y - * but we need to work other dependencies like SMP_SUSPEND etc - * before this can be done without some confusion. - * if (CPU_HOTPLUG_ENABLED || num_processors > 8) - * - Ashok Raj - */ - if (num_processors > 8) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(ver)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: - def_to_bigsmp = 1; - } - } -#ifdef CONFIG_SMP - /* are we being called early in kernel startup? */ - if (x86_cpu_to_apicid_early_ptr) { - u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; - u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; - - cpu_to_apicid[cpu] = m->mpc_apicid; - bios_cpu_apicid[cpu] = m->mpc_apicid; - } else { - per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; - per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; - } -#endif - cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); + generic_processor_info(apicid, m->mpc_apicver); } static void __init MP_bus_info (struct mpc_config_bus *m) -- cgit v1.2.1 From 08bef9d337f26747b9520278872d20f15983fcda Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:20 +0300 Subject: x86: don't use MP_processor_info for ACPI mode Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index b0aed978eef1..9f23018190c5 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -114,7 +114,6 @@ static void __cpuinit generic_processor_info(int apicid, int version) int cpu; cpumask_t tmp_map; physid_mask_t phys_cpu; - /* * Validate version @@ -893,34 +892,18 @@ void __init mp_register_lapic_address(u64 address) void __cpuinit mp_register_lapic (u8 id, u8 enabled) { - struct mpc_config_processor processor; - int boot_cpu = 0; - if (MAX_APICS - id <= 0) { printk(KERN_WARNING "Processor #%d invalid (max %d)\n", id, MAX_APICS); return; } + if (!enabled) { ++disabled_cpus; return; } - if (id == boot_cpu_physical_apicid) - boot_cpu = 1; - - processor.mpc_type = MP_PROCESSOR; - processor.mpc_apicid = id; - processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); - processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); - processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); - processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; - processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; - processor.mpc_reserved[0] = 0; - processor.mpc_reserved[1] = 0; - - MP_processor_info(&processor); + generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR))); } #ifdef CONFIG_X86_IO_APIC -- cgit v1.2.1 From e81b2c62d66068d210ddeacd77076068184d414a Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:31 +0300 Subject: x86: move apic_ver array to apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 3 +++ arch/x86/kernel/mpparse_32.c | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 6f506020bd7d..bdfffb091d12 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1225,6 +1225,9 @@ fake_ioapic_page: * This initializes the IO-APIC and APIC hardware if this is * a UP kernel. */ + +int apic_version[MAX_APICS]; + int __init APIC_init_uniprocessor(void) { if (enable_local_apic < 0) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 9f23018190c5..18882697987e 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -41,7 +41,6 @@ unsigned int __cpuinitdata maxcpus = NR_CPUS; * Various Linux-internal data structures created from the * MP-table. */ -int apic_version [MAX_APICS]; #if defined (CONFIG_MCA) || defined (CONFIG_EISA) int mp_bus_id_to_type [MAX_MP_BUSSES]; #endif -- cgit v1.2.1 From 8f6e2ca9f862cb3738ad83fb18c572d8a59c0849 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:38 +0300 Subject: x86: move mp_lapic_addr to apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 2 ++ arch/x86/kernel/mpparse_32.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index bdfffb091d12..68ea75fa6d35 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -50,6 +50,8 @@ # error SPURIOUS_APIC_VECTOR definition error #endif +unsigned long mp_lapic_addr; + /* * Knob to control our willingness to enable the local APIC. * diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 18882697987e..728aa9900934 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -60,7 +60,6 @@ int mp_irq_entries; int nr_ioapics; int pic_mode; -unsigned long mp_lapic_addr; unsigned int def_to_bigsmp = 0; -- cgit v1.2.1 From 40014bace17ba393409fd8a4915a87e43687aac8 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:44 +0300 Subject: x86: move phys_cpu_present_map to smpboot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 +++- arch/x86/kernel/smpboot.c | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 728aa9900934..f7eceabc7da9 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -70,8 +70,10 @@ unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; -/* Bitmask of physically existing CPUs */ +/* Make it easy to share the UP and SMP code: */ +#ifndef CONFIG_X86_SMP physid_mask_t phys_cpu_present_map; +#endif #ifndef CONFIG_SMP DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 61b9a5b6fc07..8b6eefd9e906 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -85,6 +85,10 @@ u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata void *x86_bios_cpu_apicid_early_ptr; DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + +/* Bitmask of physically existing CPUs */ +physid_mask_t phys_cpu_present_map; + u8 apicid_2_node[MAX_APICID]; #endif -- cgit v1.2.1 From 2bb9e9d7c1b03454665cd99f7d73e67139cdf2e6 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:50 +0300 Subject: x86: move num_processors to smpboot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 5 ++++- arch/x86/kernel/smpboot.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index f7eceabc7da9..4d810b0384a5 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -65,8 +65,11 @@ unsigned int def_to_bigsmp = 0; /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; -/* Internal processor count */ + +/* Make it easy to share the UP and SMP code: */ +#ifndef CONFIG_X86_SMP unsigned int num_processors; +#endif unsigned disabled_cpus __cpuinitdata; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b6eefd9e906..e1288c2626f8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -86,6 +86,9 @@ void *x86_bios_cpu_apicid_early_ptr; DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); +/* Internal processor count */ +unsigned int num_processors; + /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; -- cgit v1.2.1 From 53c4c793b30bbf6e1a25cab61790b18f205dd365 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:54:57 +0300 Subject: x86: move disabled_cpus to smpboot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 3 +-- arch/x86/kernel/smpboot.c | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 4d810b0384a5..bf29dcc37de7 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -69,9 +69,8 @@ unsigned int boot_cpu_physical_apicid = -1U; /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP unsigned int num_processors; -#endif - unsigned disabled_cpus __cpuinitdata; +#endif /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e1288c2626f8..d3402e2c57eb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -88,6 +88,7 @@ EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); /* Internal processor count */ unsigned int num_processors; +unsigned disabled_cpus __cpuinitdata; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; -- cgit v1.2.1 From 059c9640b57cb8e70c60de141ec817b450431816 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 28 Mar 2008 11:57:55 +0100 Subject: x86: mpparse, move disabled cpus to smpboot.c, fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index bf29dcc37de7..0f877a572cf0 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -196,7 +196,9 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) int apicid; if (!(m->mpc_cpuflag & CPU_ENABLED)) { +#ifdef CONFIG_X86_SMP disabled_cpus++; +#endif return; } @@ -901,7 +903,9 @@ void __cpuinit mp_register_lapic (u8 id, u8 enabled) } if (!enabled) { +#ifdef CONFIG_X86_SMP ++disabled_cpus; +#endif return; } -- cgit v1.2.1 From 0c254e38d294d3720588e2a1fd954d828073f1dc Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:04 +0300 Subject: x86: move def_to_bigsmp to setup_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 2 -- arch/x86/kernel/setup_32.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 0f877a572cf0..e5376dc5d0cc 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -61,8 +61,6 @@ int nr_ioapics; int pic_mode; -unsigned int def_to_bigsmp = 0; - /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 58f3c1fbc5c3..4b198d9d0de3 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -155,6 +155,8 @@ struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; EXPORT_SYMBOL(boot_cpu_data); +unsigned int def_to_bigsmp; + #ifndef CONFIG_X86_PAE unsigned long mmu_cr4_features; #else -- cgit v1.2.1 From 837e0e7a7f574220c87c552cca9f425575418621 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:10 +0300 Subject: x86: move boot_cpu_physical_apicid to apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 3 +++ arch/x86/kernel/mpparse_32.c | 4 +--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 68ea75fa6d35..f0abd59a2a3c 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -52,6 +52,9 @@ unsigned long mp_lapic_addr; +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; + /* * Knob to control our willingness to enable the local APIC. * diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index e5376dc5d0cc..9143810bb637 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -61,13 +61,11 @@ int nr_ioapics; int pic_mode; -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; - /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; +unsigned int boot_cpu_physical_apicid = -1U; #endif /* Make it easy to share the UP and SMP code: */ -- cgit v1.2.1 From fae9811b775655a02dcb51fa0b6423b546468bd1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 28 Mar 2008 12:22:10 +0100 Subject: x86: mpparse, move boot cpu physical apicid to apic_32.c, fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 9143810bb637..9b61b50a96c9 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -65,8 +65,10 @@ int pic_mode; #ifndef CONFIG_X86_SMP unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; +#ifndef CONFIG_X86_LOCAL_APIC unsigned int boot_cpu_physical_apicid = -1U; #endif +#endif /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP -- cgit v1.2.1 From acff5a768935f7f39e4e3be03940d70c005ffe96 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:16 +0300 Subject: x86: move x86_bios_cpu_apicid to apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 3 +++ arch/x86/kernel/mpparse_32.c | 4 ---- arch/x86/kernel/smpboot.c | 2 -- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index f0abd59a2a3c..65036bbaf058 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -55,6 +55,9 @@ unsigned long mp_lapic_addr; /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; +DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + /* * Knob to control our willingness to enable the local APIC. * diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 9b61b50a96c9..c79d6e06c3fa 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -75,10 +75,6 @@ unsigned int boot_cpu_physical_apicid = -1U; physid_mask_t phys_cpu_present_map; #endif -#ifndef CONFIG_SMP -DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; -#endif - /* * Intel MP BIOS table parsing routines: */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d3402e2c57eb..7bcee1584b50 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -83,8 +83,6 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; -DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; -EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); /* Internal processor count */ unsigned int num_processors; -- cgit v1.2.1 From 903dcb5a1bd0ef2b09d756f646e367cd12659b6f Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:22 +0300 Subject: x86: move generic_processor_info to apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 82 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/mpparse_32.c | 81 ------------------------------------------- include/asm-x86/mpspec.h | 1 + 3 files changed, 83 insertions(+), 81 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 65036bbaf058..a99398f71234 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1469,6 +1469,88 @@ void disconnect_bsp_APIC(int virt_wire_setup) } } +unsigned int __cpuinitdata maxcpus = NR_CPUS; + +void __cpuinit generic_processor_info(int apicid, int version) +{ + int cpu; + cpumask_t tmp_map; + physid_mask_t phys_cpu; + + /* + * Validate version + */ + if (version == 0x0) { + printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " + "fixing up to 0x10. (tell your hw vendor)\n", + version); + version = 0x10; + } + apic_version[apicid] = version; + + phys_cpu = apicid_to_cpu_present(apicid); + physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); + + if (num_processors >= NR_CPUS) { + printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." + " Processor ignored.\n", NR_CPUS); + return; + } + + if (num_processors >= maxcpus) { + printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." + " Processor ignored.\n", maxcpus); + return; + } + + num_processors++; + cpus_complement(tmp_map, cpu_present_map); + cpu = first_cpu(tmp_map); + + if (apicid == boot_cpu_physical_apicid) + /* + * x86_bios_cpu_apicid is required to have processors listed + * in same order as logical cpu numbers. Hence the first + * entry is BSP, and so on. + */ + cpu = 0; + + /* + * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y + * but we need to work other dependencies like SMP_SUSPEND etc + * before this can be done without some confusion. + * if (CPU_HOTPLUG_ENABLED || num_processors > 8) + * - Ashok Raj + */ + if (num_processors > 8) { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + if (!APIC_XAPIC(version)) { + def_to_bigsmp = 0; + break; + } + /* If P4 and above fall through */ + case X86_VENDOR_AMD: + def_to_bigsmp = 1; + } + } +#ifdef CONFIG_SMP + /* are we being called early in kernel startup? */ + if (x86_cpu_to_apicid_early_ptr) { + u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; + u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; + + cpu_to_apicid[cpu] = apicid; + bios_cpu_apicid[cpu] = apicid; + } else { + per_cpu(x86_cpu_to_apicid, cpu) = apicid; + per_cpu(x86_bios_cpu_apicid, cpu) = apicid; + } +#endif + cpu_set(cpu, cpu_possible_map); + cpu_set(cpu, cpu_present_map); +} + /* * Power management */ diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index c79d6e06c3fa..cd4522b3e90e 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -35,7 +35,6 @@ /* Have we found an MP table */ int smp_found_config; -unsigned int __cpuinitdata maxcpus = NR_CPUS; /* * Various Linux-internal data structures created from the @@ -105,86 +104,6 @@ static int mpc_record; static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata; #endif -static void __cpuinit generic_processor_info(int apicid, int version) -{ - int cpu; - cpumask_t tmp_map; - physid_mask_t phys_cpu; - - /* - * Validate version - */ - if (version == 0x0) { - printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - version); - version = 0x10; - } - apic_version[apicid] = version; - - phys_cpu = apicid_to_cpu_present(apicid); - physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); - - if (num_processors >= NR_CPUS) { - printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." - " Processor ignored.\n", NR_CPUS); - return; - } - - if (num_processors >= maxcpus) { - printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." - " Processor ignored.\n", maxcpus); - return; - } - - num_processors++; - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); - - if (apicid == boot_cpu_physical_apicid) - /* - * x86_bios_cpu_apicid is required to have processors listed - * in same order as logical cpu numbers. Hence the first - * entry is BSP, and so on. - */ - cpu = 0; - - /* - * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y - * but we need to work other dependencies like SMP_SUSPEND etc - * before this can be done without some confusion. - * if (CPU_HOTPLUG_ENABLED || num_processors > 8) - * - Ashok Raj - */ - if (num_processors > 8) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(version)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: - def_to_bigsmp = 1; - } - } -#ifdef CONFIG_SMP - /* are we being called early in kernel startup? */ - if (x86_cpu_to_apicid_early_ptr) { - u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; - u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; - - cpu_to_apicid[cpu] = apicid; - bios_cpu_apicid[cpu] = apicid; - } else { - per_cpu(x86_cpu_to_apicid, cpu) = apicid; - per_cpu(x86_bios_cpu_apicid, cpu) = apicid; - } -#endif - cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); -} - static void __cpuinit MP_processor_info(struct mpc_config_processor *m) { int apicid; diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index eccbc581ec84..7ee54d396d60 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -43,6 +43,7 @@ extern unsigned long mp_lapic_addr; extern void find_smp_config(void); extern void get_smp_config(void); +void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI extern void mp_register_lapic(u8 id, u8 enabled); extern void mp_register_lapic_address(u64 address); -- cgit v1.2.1 From 86c9835b46605fb29a3c30c6cc344d9df49e54a3 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 28 Mar 2008 11:59:57 +0100 Subject: x86: mpparse, move generic processor info to apic_32.c fix Signed-off-by: Ingo Molnar --- include/asm-x86/mpspec.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 7ee54d396d60..31bac12a97d1 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -1,6 +1,8 @@ #ifndef _AM_X86_MPSPEC_H #define _AM_X86_MPSPEC_H +#include + #include #ifdef CONFIG_X86_32 -- cgit v1.2.1 From 8ccab29ca8c441ae00d878d2f0000275f430f8a5 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:28 +0300 Subject: x86: don't call MP_processor_info for disabled cpu (64bit) Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 83a36eed081b..3681b9d8f557 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -678,6 +678,10 @@ void __cpuinit mp_register_lapic(u8 id, u8 enabled) struct mpc_config_processor processor; int boot_cpu = 0; + if (!enabled) { + ++disabled_cpus; + return; + } if (id == boot_cpu_physical_apicid) boot_cpu = 1; -- cgit v1.2.1 From 0e01c00c1fadd21356a6cf57d6680497256e1a01 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:34 +0300 Subject: x86: separate generic_processor_info into its own function (64bit) Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 3681b9d8f557..7f8ece4190e6 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -93,22 +93,10 @@ static int __init mpf_checksum(unsigned char *mp, int len) return sum & 0xFF; } -static void __cpuinit MP_processor_info(struct mpc_config_processor *m) +void __cpuinit generic_processor_info(int apicid, int version) { int cpu; cpumask_t tmp_map; - char *bootup_cpu = ""; - - if (!(m->mpc_cpuflag & CPU_ENABLED)) { - disabled_cpus++; - return; - } - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { - bootup_cpu = " (Bootup-CPU)"; - boot_cpu_physical_apicid = m->mpc_apicid; - } - - printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); if (num_processors >= NR_CPUS) { printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." @@ -126,8 +114,8 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) cpus_complement(tmp_map, cpu_present_map); cpu = first_cpu(tmp_map); - physid_set(m->mpc_apicid, phys_cpu_present_map); - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { + physid_set(apicid, phys_cpu_present_map); + if (apicid == boot_cpu_physical_apicid) { /* * x86_bios_cpu_apicid is required to have processors listed * in same order as logical cpu numbers. Hence the first @@ -140,17 +128,34 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; - cpu_to_apicid[cpu] = m->mpc_apicid; - bios_cpu_apicid[cpu] = m->mpc_apicid; + cpu_to_apicid[cpu] = apicid; + bios_cpu_apicid[cpu] = apicid; } else { - per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; - per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; + per_cpu(x86_cpu_to_apicid, cpu) = apicid; + per_cpu(x86_bios_cpu_apicid, cpu) = apicid; } cpu_set(cpu, cpu_possible_map); cpu_set(cpu, cpu_present_map); } +static void __cpuinit MP_processor_info(struct mpc_config_processor *m) +{ + char *bootup_cpu = ""; + + if (!(m->mpc_cpuflag & CPU_ENABLED)) { + disabled_cpus++; + return; + } + if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { + bootup_cpu = " (Bootup-CPU)"; + boot_cpu_physical_apicid = m->mpc_apicid; + } + + printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); + generic_processor_info(m->mpc_apicid, 0); +} + static void __init MP_bus_info(struct mpc_config_bus *m) { char str[7]; -- cgit v1.2.1 From 468e85b9594ed3000a7076f1caf27aa0cf7799fc Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:41 +0300 Subject: x86: don't use MP_processor_info for ACPI mode (64bit) Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 7f8ece4190e6..4da834e1188e 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -680,27 +680,12 @@ void __init mp_register_lapic_address(u64 address) void __cpuinit mp_register_lapic(u8 id, u8 enabled) { - struct mpc_config_processor processor; - int boot_cpu = 0; - if (!enabled) { ++disabled_cpus; return; } - if (id == boot_cpu_physical_apicid) - boot_cpu = 1; - - processor.mpc_type = MP_PROCESSOR; - processor.mpc_apicid = id; - processor.mpc_apicver = 0; - processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); - processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); - processor.mpc_cpufeature = 0; - processor.mpc_featureflag = 0; - processor.mpc_reserved[0] = 0; - processor.mpc_reserved[1] = 0; - MP_processor_info(&processor); + generic_processor_info(id, 0); } #define MP_ISA_BUS 0 -- cgit v1.2.1 From 3f530709d907d93a4d6881e8190916028181a840 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:47 +0300 Subject: x86: move mp_lapic_addr to apic_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 2 ++ arch/x86/kernel/mpparse_64.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 7dd6250aaf6c..0794646e68f0 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -85,6 +85,8 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static unsigned long apic_phys; +unsigned long mp_lapic_addr; + /* * Get the LAPIC version */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 4da834e1188e..a91e21ebf1ca 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -55,7 +55,6 @@ struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; int mp_irq_entries; int nr_ioapics; -unsigned long mp_lapic_addr = 0; /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; -- cgit v1.2.1 From 7abb3cca33fe220abaf680afcd247370749622ee Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 8 Apr 2008 12:20:07 +0200 Subject: x86: move phys cpu present map to smpboot.c, 64-bit, prepare Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index a91e21ebf1ca..f044e98800ef 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -68,9 +68,11 @@ unsigned disabled_cpus __cpuinitdata; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; +#ifdef CONFIG_SMP u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; +#endif DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); -- cgit v1.2.1 From 1d8554326533568c7e9d5285600c3d0c027b45cc Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:53 +0300 Subject: x86: move phys_cpu_present_map to smpboot.c (64bit) Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 3 --- arch/x86/kernel/smpboot.c | 6 +++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index f044e98800ef..8e8e38f2b182 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -65,9 +65,6 @@ unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; -/* Bitmask of physically existing CPUs */ -physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; - #ifdef CONFIG_SMP u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = BAD_APICID }; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7bcee1584b50..eee7768de2ae 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -88,12 +88,12 @@ void *x86_bios_cpu_apicid_early_ptr; unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; -/* Bitmask of physically existing CPUs */ -physid_mask_t phys_cpu_present_map; - u8 apicid_2_node[MAX_APICID]; #endif +/* Bitmask of physically existing CPUs */ +physid_mask_t phys_cpu_present_map; + /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -- cgit v1.2.1 From 7b8cbd2c2f1bf9e3090d3c3fc09330ed1ca28d25 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:55:59 +0300 Subject: x86: move num_processors to smpboot.c (64 bit) Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 3 --- arch/x86/kernel/smpboot.c | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 8e8e38f2b182..71f098414d0b 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -60,9 +60,6 @@ int nr_ioapics; unsigned int boot_cpu_physical_apicid = -1U; EXPORT_SYMBOL(boot_cpu_physical_apicid); -/* Internal processor count */ -unsigned int num_processors; - unsigned disabled_cpus __cpuinitdata; #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index eee7768de2ae..fd0bdd36f4ea 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -84,13 +84,14 @@ u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; -/* Internal processor count */ -unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; u8 apicid_2_node[MAX_APICID]; #endif +/* Internal processor count */ +unsigned int num_processors; + /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; -- cgit v1.2.1 From 3103623eed1a3ea4a36ee26725842a8038760648 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:56:06 +0300 Subject: x86: move disabled_cpus to smpboot.c (64bit) Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 2 -- arch/x86/kernel/smpboot.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 71f098414d0b..a1d8d4432988 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -60,8 +60,6 @@ int nr_ioapics; unsigned int boot_cpu_physical_apicid = -1U; EXPORT_SYMBOL(boot_cpu_physical_apicid); -unsigned disabled_cpus __cpuinitdata; - #ifdef CONFIG_SMP u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = BAD_APICID }; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index fd0bdd36f4ea..f45d740b1b6a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -84,8 +84,6 @@ u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; -unsigned disabled_cpus __cpuinitdata; - u8 apicid_2_node[MAX_APICID]; #endif @@ -98,6 +96,8 @@ physid_mask_t phys_cpu_present_map; /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; +unsigned disabled_cpus __cpuinitdata; + /* Store all idle threads, this can be reused instead of creating * a new thread. Also avoids complicated thread destroy functionality * for idle threads. -- cgit v1.2.1 From 86cc0d916a9cc55b0b46a9b31e9379cd3c9e10dc Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:56:12 +0300 Subject: x86: move boot_cpu_physical_apicid to apic_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 4 ++++ arch/x86/kernel/mpparse_64.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 0794646e68f0..d7d3594f93e3 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -87,6 +87,10 @@ static unsigned long apic_phys; unsigned long mp_lapic_addr; +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL(boot_cpu_physical_apicid); + /* * Get the LAPIC version */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index a1d8d4432988..49e3bfe01022 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -56,10 +56,6 @@ int mp_irq_entries; int nr_ioapics; -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; -EXPORT_SYMBOL(boot_cpu_physical_apicid); - #ifdef CONFIG_SMP u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = BAD_APICID }; -- cgit v1.2.1 From be8a5685e4cdb904e6542e741fcc3bae1becb8ee Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 27 Mar 2008 23:56:19 +0300 Subject: x86: move generic_processor_info to apic_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 47 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/mpparse_64.c | 49 +------------------------------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index d7d3594f93e3..4ee521ff0a3e 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -91,6 +91,7 @@ unsigned long mp_lapic_addr; unsigned int boot_cpu_physical_apicid = -1U; EXPORT_SYMBOL(boot_cpu_physical_apicid); +unsigned int __cpuinitdata maxcpus = NR_CPUS; /* * Get the LAPIC version */ @@ -1057,6 +1058,52 @@ void disconnect_bsp_APIC(int virt_wire_setup) apic_write(APIC_LVT1, value); } +void __cpuinit generic_processor_info(int apicid, int version) +{ + int cpu; + cpumask_t tmp_map; + + if (num_processors >= NR_CPUS) { + printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." + " Processor ignored.\n", NR_CPUS); + return; + } + + if (num_processors >= maxcpus) { + printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." + " Processor ignored.\n", maxcpus); + return; + } + + num_processors++; + cpus_complement(tmp_map, cpu_present_map); + cpu = first_cpu(tmp_map); + + physid_set(apicid, phys_cpu_present_map); + if (apicid == boot_cpu_physical_apicid) { + /* + * x86_bios_cpu_apicid is required to have processors listed + * in same order as logical cpu numbers. Hence the first + * entry is BSP, and so on. + */ + cpu = 0; + } + /* are we being called early in kernel startup? */ + if (x86_cpu_to_apicid_early_ptr) { + u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; + u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; + + cpu_to_apicid[cpu] = apicid; + bios_cpu_apicid[cpu] = apicid; + } else { + per_cpu(x86_cpu_to_apicid, cpu) = apicid; + per_cpu(x86_bios_cpu_apicid, cpu) = apicid; + } + + cpu_set(cpu, cpu_possible_map); + cpu_set(cpu, cpu_present_map); +} + /* * Power management */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 49e3bfe01022..d62294003036 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -35,7 +35,6 @@ /* Have we found an MP table */ int smp_found_config; -unsigned int __cpuinitdata maxcpus = NR_CPUS; /* * Various Linux-internal data structures created from the @@ -82,52 +81,6 @@ static int __init mpf_checksum(unsigned char *mp, int len) return sum & 0xFF; } -void __cpuinit generic_processor_info(int apicid, int version) -{ - int cpu; - cpumask_t tmp_map; - - if (num_processors >= NR_CPUS) { - printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." - " Processor ignored.\n", NR_CPUS); - return; - } - - if (num_processors >= maxcpus) { - printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." - " Processor ignored.\n", maxcpus); - return; - } - - num_processors++; - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); - - physid_set(apicid, phys_cpu_present_map); - if (apicid == boot_cpu_physical_apicid) { - /* - * x86_bios_cpu_apicid is required to have processors listed - * in same order as logical cpu numbers. Hence the first - * entry is BSP, and so on. - */ - cpu = 0; - } - /* are we being called early in kernel startup? */ - if (x86_cpu_to_apicid_early_ptr) { - u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; - u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; - - cpu_to_apicid[cpu] = apicid; - bios_cpu_apicid[cpu] = apicid; - } else { - per_cpu(x86_cpu_to_apicid, cpu) = apicid; - per_cpu(x86_bios_cpu_apicid, cpu) = apicid; - } - - cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); -} - static void __cpuinit MP_processor_info(struct mpc_config_processor *m) { char *bootup_cpu = ""; @@ -666,7 +619,6 @@ void __init mp_register_lapic_address(u64 address) if (boot_cpu_physical_apicid == -1U) boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); } - void __cpuinit mp_register_lapic(u8 id, u8 enabled) { if (!enabled) { @@ -677,6 +629,7 @@ void __cpuinit mp_register_lapic(u8 id, u8 enabled) generic_processor_info(id, 0); } + #define MP_ISA_BUS 0 #define MP_MAX_IOAPIC_PIN 127 -- cgit v1.2.1 From fe176de0ffdc2dd300fbcece84434a32b482b5b1 Mon Sep 17 00:00:00 2001 From: Ben Castricum Date: Thu, 27 Mar 2008 20:52:35 +0100 Subject: x86: microcode: show results on success too Report when microcode was successfully updated. It used to be there but now with DEBUG unset it becomes very silent. Also some cosmetic fixes. Signed-off-by: Ben Castricum Signed-off-by: Ingo Molnar --- arch/x86/kernel/microcode.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index f2702d01b8a8..25cf6dee4e56 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c @@ -290,7 +290,7 @@ static int get_maching_microcode(void *mc, int cpu) } return 0; find: - pr_debug("microcode: CPU %d found a matching microcode update with" + pr_debug("microcode: CPU%d found a matching microcode update with" " version 0x%x (current=0x%x)\n", cpu, mc_header->rev,uci->rev); new_mc = vmalloc(total_size); if (!new_mc) { @@ -336,11 +336,11 @@ static void apply_microcode(int cpu) spin_unlock_irqrestore(µcode_update_lock, flags); if (val[1] != uci->mc->hdr.rev) { - printk(KERN_ERR "microcode: CPU%d updated from revision " + printk(KERN_ERR "microcode: CPU%d update from revision " "0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]); return; } - pr_debug("microcode: CPU%d updated from revision " + printk(KERN_INFO "microcode: CPU%d updated from revision " "0x%x to 0x%x, date = %08x \n", cpu_num, uci->rev, val[1], uci->mc->hdr.date); uci->rev = val[1]; @@ -534,7 +534,7 @@ static int cpu_request_microcode(int cpu) c->x86, c->x86_model, c->x86_mask); error = request_firmware(&firmware, name, µcode_pdev->dev); if (error) { - pr_debug("ucode data file %s load failed\n", name); + pr_debug("microcode: ucode data file %s load failed\n", name); return error; } buf = firmware->data; @@ -709,7 +709,7 @@ static int __mc_sysdev_add(struct sys_device *sys_dev, int resume) if (!cpu_online(cpu)) return 0; - pr_debug("Microcode:CPU %d added\n", cpu); + pr_debug("microcode: CPU%d added\n", cpu); memset(uci, 0, sizeof(*uci)); err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); @@ -733,7 +733,7 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) if (!cpu_online(cpu)) return 0; - pr_debug("Microcode:CPU %d removed\n", cpu); + pr_debug("microcode: CPU%d removed\n", cpu); microcode_fini_cpu(cpu); sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); return 0; @@ -745,7 +745,7 @@ static int mc_sysdev_resume(struct sys_device *dev) if (!cpu_online(cpu)) return 0; - pr_debug("Microcode:CPU %d resumed\n", cpu); + pr_debug("microcode: CPU%d resumed\n", cpu); /* only CPU 0 will apply ucode here */ apply_microcode(0); return 0; @@ -783,7 +783,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) } case CPU_DOWN_FAILED_FROZEN: if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) - printk(KERN_ERR "Microcode: Failed to create the sysfs " + printk(KERN_ERR "microcode: Failed to create the sysfs " "group for CPU%d\n", cpu); break; case CPU_DOWN_PREPARE: -- cgit v1.2.1 From fe874b3edff43f9a74d9903eb3710e5e0511faf1 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:56 -0300 Subject: x86: surround hard_smp_processor_id in APIC_DEFINITION APIC_DEFINITION is not defined in x86_64, so in practice, we keep our old code here. But as a nice side effect, the code is now equal to smp_32.h. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index c53a011bb91c..25206333476b 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -52,12 +52,16 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } -#include +# ifdef APIC_DEFINITION +extern int hard_smp_processor_id(void); +# else +# include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); } +# endif /* APIC_DEFINITION */ #endif -- cgit v1.2.1 From 2ba95bcbe68d692f549fb10809f15681a25ff6fb Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:57 -0300 Subject: x86: provide bogus hard_smp_processor_id We provide a bogus macro for x86_64 in case CONFIG_X86_LOCAL_APIC is not set. It will always be set for x86_64, so the effect is just to make the code equal to i386. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 25206333476b..c46585e09ea1 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -46,6 +46,8 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() smp_processor_id() +#ifdef CONFIG_X86_LOCAL_APIC + static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ @@ -63,5 +65,13 @@ static inline int hard_smp_processor_id(void) } # endif /* APIC_DEFINITION */ +#else /* CONFIG_X86_LOCAL_APIC */ + +# ifndef CONFIG_SMP +# define hard_smp_processor_id() 0 +# endif + +#endif /* CONFIG_X86_LOCAL_APIC */ + #endif -- cgit v1.2.1 From 1b00084386878f25c2c591ad19cb625880d4089d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:58 -0300 Subject: x86: merge hard/logical_smp_processor_id The code is now the same between i386 and x86_64. We already know what happens when it reaches this point: They go away from the arch-specific headers, and suddenly appears in the common header. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 27 +++++++++++++++++++++++++++ include/asm-x86/smp_32.h | 27 --------------------------- include/asm-x86/smp_64.h | 27 --------------------------- 3 files changed, 27 insertions(+), 54 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index ef26911dc22a..e5534f19c312 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -123,6 +123,33 @@ void smp_store_cpu_info(int id); # include "smp_64.h" #endif +#ifdef CONFIG_X86_LOCAL_APIC + +static inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); +} + +# ifdef APIC_DEFINITION +extern int hard_smp_processor_id(void); +# else +# include +static inline int hard_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); +} +# endif /* APIC_DEFINITION */ + +#else /* CONFIG_X86_LOCAL_APIC */ + +# ifndef CONFIG_SMP +# define hard_smp_processor_id() 0 +# endif + +#endif /* CONFIG_X86_LOCAL_APIC */ + #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index cb3ada2fedbf..53432dbd5426 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -42,32 +42,5 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() 0 #endif /* !CONFIG_SMP */ -#ifdef CONFIG_X86_LOCAL_APIC - -static inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -} - -# ifdef APIC_DEFINITION -extern int hard_smp_processor_id(void); -# else -# include -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); -} -# endif /* APIC_DEFINITION */ - -#else /* CONFIG_X86_LOCAL_APIC */ - -# ifndef CONFIG_SMP -# define hard_smp_processor_id() 0 -# endif - -#endif /* CONFIG_X86_LOCAL_APIC */ - #endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index c46585e09ea1..015d36e29ade 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -46,32 +46,5 @@ static inline int num_booting_cpus(void) #define safe_smp_processor_id() smp_processor_id() -#ifdef CONFIG_X86_LOCAL_APIC - -static inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); -} - -# ifdef APIC_DEFINITION -extern int hard_smp_processor_id(void); -# else -# include -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); -} -# endif /* APIC_DEFINITION */ - -#else /* CONFIG_X86_LOCAL_APIC */ - -# ifndef CONFIG_SMP -# define hard_smp_processor_id() 0 -# endif - -#endif /* CONFIG_X86_LOCAL_APIC */ - #endif -- cgit v1.2.1 From c1fa6c977eb978e1d09867475ec59c9a5799127f Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:05:59 -0300 Subject: x86: surround apic headers in apic definitions Although those constants are always defined in x86_64, and will have the effect of just including the headers in the very way we did before, I'm doing this in a separate patch to be conservative and avoid surprises. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 015d36e29ade..b83151d7388c 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -7,9 +7,13 @@ /* * We need the APIC definitions automatically as part of 'smp.h' */ -#include -#include -#include +#ifdef CONFIG_X86_LOCAL_APIC +# include +# include +# ifdef CONFIG_X86_IO_APIC +# include +# endif +#endif #include #include -- cgit v1.2.1 From b23dab08fa37b302a8980e4cf925f2cb94288538 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:00 -0300 Subject: x86: merge includes in smp.h move all include directives from smp_{32,64}.h to smp.h. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 13 +++++++++++++ include/asm-x86/smp_32.h | 13 ------------- include/asm-x86/smp_64.h | 16 ---------------- 3 files changed, 13 insertions(+), 29 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index e5534f19c312..21472cea3d6c 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -5,6 +5,19 @@ #include #include +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifdef CONFIG_X86_LOCAL_APIC +# include +# include +# ifdef CONFIG_X86_IO_APIC +# include +# endif +#endif +#include +#include + extern cpumask_t cpu_callout_map; extern int smp_num_siblings; diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 53432dbd5426..694d3245a88f 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -2,19 +2,6 @@ #define __ASM_SMP_H #ifndef __ASSEMBLY__ -#include -#include - -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifdef CONFIG_X86_LOCAL_APIC -# include -# include -# ifdef CONFIG_X86_IO_APIC -# include -# endif -#endif extern cpumask_t cpu_callin_map; diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index b83151d7388c..eead92e30b29 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -1,22 +1,6 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H -#include -#include - -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifdef CONFIG_X86_LOCAL_APIC -# include -# include -# ifdef CONFIG_X86_IO_APIC -# include -# endif -#endif -#include -#include - extern cpumask_t cpu_initialized; extern cpumask_t cpu_callin_map; -- cgit v1.2.1 From 24e8ecffa84dd560e0d4d6fcaeca6950805854e7 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:01 -0300 Subject: x86: split safe_smp_processor_id This implementation in x86_64 is clean and consistent, but we sacrifice it for the sake of being equal to i386 (since the other way around would be harder). Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index eead92e30b29..8ea49529f324 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -27,12 +27,12 @@ static inline int num_booting_cpus(void) return cpus_weight(cpu_callout_map); } +#define safe_smp_processor_id() smp_processor_id() #else /* CONFIG_SMP */ #define stack_smp_processor_id() 0 - +#define safe_smp_processor_id() 0 #endif /* !CONFIG_SMP */ -#define safe_smp_processor_id() smp_processor_id() #endif -- cgit v1.2.1 From a9c057c1d1b1080a01004ecac54308365e167b83 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:02 -0300 Subject: x86: merge SMP definitions of smp.h we merge everything that is inside CONFIG_SMP to smp.h. They differ a little bit, so we use CONFIG_X86_32_SMP and CONFIG_X86_64_SMP as markers. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 33 ++++++++++++++++++++++++++++++++- include/asm-x86/smp_32.h | 21 --------------------- include/asm-x86/smp_64.h | 27 --------------------------- 3 files changed, 32 insertions(+), 49 deletions(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 21472cea3d6c..57b3d86dd9ed 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -126,8 +126,39 @@ extern unsigned long setup_trampoline(void); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -#else + +/* We don't mark CPUs online until __cpu_up(), so we need another measure */ +static inline int num_booting_cpus(void) +{ + return cpus_weight(cpu_callout_map); +} +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_X86_32_SMP +/* + * This function is needed by all SMP systems. It must _always_ be valid + * from the initial startup. We map APIC_BASE very early in page_setup(), + * so this is correct in the x86 case. + */ +DECLARE_PER_CPU(int, cpu_number); +#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) +extern int safe_smp_processor_id(void); + +#elif defined(CONFIG_X86_64_SMP) +#define raw_smp_processor_id() read_pda(cpunumber) + +#define stack_smp_processor_id() \ +({ \ + struct thread_info *ti; \ + __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + ti->cpu; \ +}) +#define safe_smp_processor_id() smp_processor_id() + +#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid +#define safe_smp_processor_id() 0 +#define stack_smp_processor_id() 0 #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 694d3245a88f..d9ae5ac93dfc 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -8,26 +8,5 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); -#ifdef CONFIG_SMP -/* - * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. - */ -DECLARE_PER_CPU(int, cpu_number); -#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) - -extern int safe_smp_processor_id(void); - -/* We don't mark CPUs online until __cpu_up(), so we need another measure */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -#else /* CONFIG_SMP */ -#define safe_smp_processor_id() 0 -#endif /* !CONFIG_SMP */ - #endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 8ea49529f324..058f41399798 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -7,32 +7,5 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -#ifdef CONFIG_SMP - -#define raw_smp_processor_id() read_pda(cpunumber) - -#define stack_smp_processor_id() \ -({ \ - struct thread_info *ti; \ - asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->cpu; \ -}) - -/* - * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies - * scheduling and IPI sending and compresses data structures. - */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -#define safe_smp_processor_id() smp_processor_id() -#else /* CONFIG_SMP */ -#define stack_smp_processor_id() 0 -#define safe_smp_processor_id() 0 -#endif /* !CONFIG_SMP */ - - #endif -- cgit v1.2.1 From ecaa6c9de759259c5ba517e5442e26452d49107e Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:03 -0300 Subject: x86: change naming of cpu_initialized_mask for xen xen does not use the global cpu_initialized mask, but rather, a specific one. So we change its name so it won't conflict with the upcoming movement of cpu_initialized_mask from smp_64.h to smp_32.h. Signed-off-by: Glauber Costa CC: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/xen/smp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index aafc54437403..e340ff92f6b6 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -35,7 +35,7 @@ #include "xen-ops.h" #include "mmu.h" -static cpumask_t cpu_initialized_map; +static cpumask_t xen_cpu_initialized_map; static DEFINE_PER_CPU(int, resched_irq); static DEFINE_PER_CPU(int, callfunc_irq); @@ -179,7 +179,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus) if (xen_smp_intr_init(0)) BUG(); - cpu_initialized_map = cpumask_of_cpu(0); + xen_cpu_initialized_map = cpumask_of_cpu(0); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { @@ -210,7 +210,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) struct vcpu_guest_context *ctxt; struct gdt_page *gdt = &per_cpu(gdt_page, cpu); - if (cpu_test_and_set(cpu, cpu_initialized_map)) + if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) return 0; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); -- cgit v1.2.1 From 8be9ac850564a409c1238cd5f53776c340aea4dc Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Thu, 27 Mar 2008 14:06:04 -0300 Subject: x86: merge smp_32.h and smp_64.h into smp.h Merge what's left from smp_32.h and smp_64.h into smp.h By now, they're basically extern definitions. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 11 +++++------ include/asm-x86/smp_32.h | 12 ------------ include/asm-x86/smp_64.h | 11 ----------- 3 files changed, 5 insertions(+), 29 deletions(-) delete mode 100644 include/asm-x86/smp_32.h delete mode 100644 include/asm-x86/smp_64.h diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 57b3d86dd9ed..bcbd25cbd863 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -19,6 +19,11 @@ #include extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_initialized; +extern cpumask_t cpu_callin_map; + +extern void (*mtrr_hook)(void); +extern void zap_low_mappings(void); extern int smp_num_siblings; extern unsigned int num_processors; @@ -161,12 +166,6 @@ extern int safe_smp_processor_id(void); #define stack_smp_processor_id() 0 #endif -#ifdef CONFIG_X86_32 -# include "smp_32.h" -#else -# include "smp_64.h" -#endif - #ifdef CONFIG_X86_LOCAL_APIC static inline int logical_smp_processor_id(void) diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h deleted file mode 100644 index d9ae5ac93dfc..000000000000 --- a/include/asm-x86/smp_32.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -#ifndef __ASSEMBLY__ - -extern cpumask_t cpu_callin_map; - -extern void (*mtrr_hook)(void); -extern void zap_low_mappings(void); - -#endif /* !ASSEMBLY */ -#endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h deleted file mode 100644 index 058f41399798..000000000000 --- a/include/asm-x86/smp_64.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -extern cpumask_t cpu_initialized; -extern cpumask_t cpu_callin_map; - -extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); - -#endif - -- cgit v1.2.1 From e937fcf2fa0c1d21f9c0008ab600d46c240a984c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 28 Mar 2008 12:33:52 +0100 Subject: x86: mpparse: 64-bit fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index d62294003036..5e789bdb34fa 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -63,6 +63,20 @@ void *x86_bios_cpu_apicid_early_ptr; DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); +/* Make it easy to share the UP and SMP code: */ +#ifndef CONFIG_X86_SMP +unsigned int num_processors; +unsigned disabled_cpus __cpuinitdata; +#ifndef CONFIG_X86_LOCAL_APIC +unsigned int boot_cpu_physical_apicid = -1U; +#endif +#endif + +/* Make it easy to share the UP and SMP code: */ +#ifndef CONFIG_X86_SMP +physid_mask_t phys_cpu_present_map; +#endif + /* * Intel MP BIOS table parsing routines: */ -- cgit v1.2.1 From fb8e8375394e1156a5a1e7ba53504b141d2365a8 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 27 Mar 2008 17:28:39 -0700 Subject: x86: sparsemem: reduce i386 PAE section size A 1G section size makes memory hotplug too coarse in a virtual environment. Retuce it by a factor of 2 to 512M. I would have liked to make it smaller, but it runs out of reserved flags in the page flags. Signed-off-by: Jeremy Fitzhardinge Cc: KAMEZAWA Hiroyuki Cc: Yasunori Goto Cc: Christoph Lameter Cc: Dave Hansen Signed-off-by: Ingo Molnar --- include/asm-x86/sparsemem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index fa684f353aa7..9bd48b0a534b 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h @@ -16,7 +16,7 @@ #ifdef CONFIG_X86_32 # ifdef CONFIG_X86_PAE -# define SECTION_SIZE_BITS 30 +# define SECTION_SIZE_BITS 29 # define MAX_PHYSADDR_BITS 36 # define MAX_PHYSMEM_BITS 36 # else -- cgit v1.2.1 From f7743fe676fadac8706e7cbd0176b46d7397996d Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 27 Mar 2008 17:28:40 -0700 Subject: x86: paravirt_ops: don't steal memory resources in paravirt_disable_iospace The memory resource is also used for main memory, and we need it to allocate physical addresses for memory hotplug. Knobbling io space is enough to get the job done anyway. Signed-off-by: Jeremy Fitzhardinge Cc: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt.c | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 075962cc75ab..3733412d1357 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -206,13 +206,6 @@ static struct resource reserve_ioports = { .flags = IORESOURCE_IO | IORESOURCE_BUSY, }; -static struct resource reserve_iomem = { - .start = 0, - .end = -1, - .name = "paravirt-iomem", - .flags = IORESOURCE_MEM | IORESOURCE_BUSY, -}; - /* * Reserve the whole legacy IO space to prevent any legacy drivers * from wasting time probing for their hardware. This is a fairly @@ -222,16 +215,7 @@ static struct resource reserve_iomem = { */ int paravirt_disable_iospace(void) { - int ret; - - ret = request_resource(&ioport_resource, &reserve_ioports); - if (ret == 0) { - ret = request_resource(&iomem_resource, &reserve_iomem); - if (ret) - release_resource(&reserve_ioports); - } - - return ret; + return request_resource(&ioport_resource, &reserve_ioports); } static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; -- cgit v1.2.1 From 6093015db2bd9e70cf20cdd23be1a50733baafdd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 30 Mar 2008 11:45:23 +0200 Subject: x86: cleanup replace most vm86 flags with flags from processor-flags.h, fix - fix build error - fix CONFIG_HEADERS_CHECK error Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps_32.c | 2 +- include/asm-x86/Kbuild | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 57a5704e3f6c..3284502a1bf8 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -959,7 +959,7 @@ debug_vm86: clear_TF_reenable: set_tsk_thread_flag(tsk, TIF_SINGLESTEP); - regs->flags &= ~TF_MASK; + regs->flags &= ~X86_EFLAGS_TF; return; } diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index 3b8160a2b47e..1e3554596f72 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild @@ -10,6 +10,7 @@ header-y += prctl.h header-y += ptrace-abi.h header-y += sigcontext32.h header-y += ucontext.h +header-y += processor-flags.h unifdef-y += e820.h unifdef-y += ist.h -- cgit v1.2.1 From 6b6891f9c545ccd45d6d8ddfd33ce27c22c271a7 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Fri, 28 Mar 2008 17:56:57 +0300 Subject: x86: cleanup - rename VM_MASK to X86_VM_MASK This patch renames VM_MASK to X86_VM_MASK (which in turn defined as alias to X86_EFLAGS_VM) to better distinguish from virtual memory flags. We can't just use X86_EFLAGS_VM instead because it is also used for conditional compilation Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps_32.c | 8 ++++---- arch/x86/kernel/vm86_32.c | 2 +- arch/x86/mm/fault.c | 2 +- include/asm-x86/ptrace.h | 4 ++-- include/asm-x86/vm86.h | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 3284502a1bf8..bb9107c56ff5 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -498,7 +498,7 @@ do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs, { struct task_struct *tsk = current; - if (regs->flags & VM_MASK) { + if (regs->flags & X86_VM_MASK) { if (vm86) goto vm86_trap; goto trap_signal; @@ -643,7 +643,7 @@ void __kprobes do_general_protection(struct pt_regs *regs, long error_code) } put_cpu(); - if (regs->flags & VM_MASK) + if (regs->flags & X86_VM_MASK) goto gp_in_vm86; if (!user_mode(regs)) @@ -922,7 +922,7 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code) goto clear_dr7; } - if (regs->flags & VM_MASK) + if (regs->flags & X86_VM_MASK) goto debug_vm86; /* Save debug status register where ptrace can see it */ @@ -1094,7 +1094,7 @@ void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) * Handle strange cache flush from user space exception * in all other cases. This is undocumented behaviour. */ - if (regs->flags & VM_MASK) { + if (regs->flags & X86_VM_MASK) { handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); return; } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 51040698c222..c866c00f4a85 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -299,7 +299,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk VEFLAGS = info->regs.pt.flags; info->regs.pt.flags &= SAFE_MASK; info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; - info->regs.pt.flags |= VM_MASK; + info->regs.pt.flags |= X86_VM_MASK; switch (info->cpu_type) { case CPU_286: diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 81fcbeec3892..fd7e1798c75a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -639,7 +639,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) #ifdef CONFIG_X86_32 /* It's safe to allow irq's after cr2 has been saved and the vmalloc fault has been handled. */ - if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) + if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK)) local_irq_enable(); /* diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index e779f2b26b32..24ec061566c5 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h @@ -170,7 +170,7 @@ static inline int user_mode(struct pt_regs *regs) static inline int user_mode_vm(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & VM_MASK)) >= + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; #else return user_mode(regs); @@ -180,7 +180,7 @@ static inline int user_mode_vm(struct pt_regs *regs) static inline int v8086_mode(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return (regs->flags & VM_MASK); + return (regs->flags & X86_VM_MASK); #else return 0; /* No V86 mode support in long mode */ #endif diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index a2be241ed036..f5f3dc479c34 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -17,9 +17,9 @@ #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #ifdef CONFIG_VM86 -#define VM_MASK 0x00020000 +#define X86_VM_MASK X86_EFLAGS_VM #else -#define VM_MASK 0 /* ignored */ +#define X86_VM_MASK 0 /* No VM86 support */ #endif #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ -- cgit v1.2.1 From a5c15d419d4b68535222b51f9054dd08d5e67470 Mon Sep 17 00:00:00 2001 From: "gorcunov@gmail.com" Date: Fri, 28 Mar 2008 17:56:56 +0300 Subject: x86: replace most VM86 flags with flags from processor-flags.h Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 2 +- arch/x86/kernel/signal_32.c | 4 ++-- arch/x86/kernel/vm86_32.c | 34 +++++++++++++++++----------------- include/asm-x86/vm86.h | 10 ++-------- 4 files changed, 22 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index cc8ae90103ff..b8c6743a13da 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -489,7 +489,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, break; case KPROBE_HIT_SS: if (p == kprobe_running()) { - regs->flags &= ~TF_MASK; + regs->flags &= ~X86_EFLAGS_TF; regs->flags |= kcb->kprobe_saved_flags; return 0; } else { diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index aa1b6a0a22e4..f1b117930837 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -419,7 +419,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, * The tracer may want to single-step inside the * handler too. */ - regs->flags &= ~(TF_MASK | X86_EFLAGS_DF); + regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); @@ -507,7 +507,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * The tracer may want to single-step inside the * handler too. */ - regs->flags &= ~(TF_MASK | X86_EFLAGS_DF); + regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index c866c00f4a85..38f566fa27d2 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -139,7 +139,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); } - set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); + set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); if (tmp) { @@ -306,13 +306,13 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk tsk->thread.v86mask = 0; break; case CPU_386: - tsk->thread.v86mask = NT_MASK | IOPL_MASK; + tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: - tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; + tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: - tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; + tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } @@ -363,24 +363,24 @@ static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) static inline void set_IF(struct kernel_vm86_regs *regs) { - VEFLAGS |= VIF_MASK; - if (VEFLAGS & VIP_MASK) + VEFLAGS |= X86_EFLAGS_VIF; + if (VEFLAGS & X86_EFLAGS_VIP) return_to_32bit(regs, VM86_STI); } static inline void clear_IF(struct kernel_vm86_regs *regs) { - VEFLAGS &= ~VIF_MASK; + VEFLAGS &= ~X86_EFLAGS_VIF; } static inline void clear_TF(struct kernel_vm86_regs *regs) { - regs->pt.flags &= ~TF_MASK; + regs->pt.flags &= ~X86_EFLAGS_TF; } static inline void clear_AC(struct kernel_vm86_regs *regs) { - regs->pt.flags &= ~AC_MASK; + regs->pt.flags &= ~X86_EFLAGS_AC; } /* @@ -399,7 +399,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs { set_flags(VEFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); - if (flags & IF_MASK) + if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); @@ -409,7 +409,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg { set_flags(VFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); - if (flags & IF_MASK) + if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); @@ -419,9 +419,9 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) { unsigned long flags = regs->pt.flags & RETURN_MASK; - if (VEFLAGS & VIF_MASK) - flags |= IF_MASK; - flags |= IOPL_MASK; + if (VEFLAGS & X86_EFLAGS_VIF) + flags |= X86_EFLAGS_IF; + flags |= X86_EFLAGS_IOPL; return flags | (VEFLAGS & current->thread.v86mask); } @@ -573,11 +573,11 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) #define CHECK_IF_IN_TRAP \ if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ - newflags |= TF_MASK + newflags |= X86_EFLAGS_TF #define VM86_FAULT_RETURN do { \ - if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ + if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ return_to_32bit(regs, VM86_PICRETURN); \ - if (orig_flags & TF_MASK) \ + if (orig_flags & X86_EFLAGS_TF) \ handle_vm86_trap(regs, 0, 1); \ return; } while (0) diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index f5f3dc479c34..074b357146df 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h @@ -12,19 +12,13 @@ * Linus */ -#define TF_MASK 0x00000100 -#define IF_MASK 0x00000200 -#define IOPL_MASK 0x00003000 -#define NT_MASK 0x00004000 +#include + #ifdef CONFIG_VM86 #define X86_VM_MASK X86_EFLAGS_VM #else #define X86_VM_MASK 0 /* No VM86 support */ #endif -#define AC_MASK 0x00040000 -#define VIF_MASK 0x00080000 /* virtual interrupt flag */ -#define VIP_MASK 0x00100000 /* virtual interrupt pending */ -#define ID_MASK 0x00200000 #define BIOSSEG 0x0f000 -- cgit v1.2.1 From 05f2d12c3563dea8c81b301f9f3cf7919af23b13 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:02 -0500 Subject: x86: change GET_APIC_ID() from an inline function to an out-of-line function Introduce a function to read the local APIC_ID. This change is in preparation for additional changes to the APICID functions that will come in a later patch. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 4 ++-- arch/x86/kernel/apic_64.c | 10 +++++----- arch/x86/kernel/genapic_flat_64.c | 2 +- arch/x86/kernel/io_apic_32.c | 6 +++--- arch/x86/kernel/io_apic_64.c | 5 ++--- arch/x86/kernel/mpparse_32.c | 2 +- arch/x86/kernel/mpparse_64.c | 2 +- arch/x86/kernel/smpboot.c | 6 +++--- include/asm-x86/mach-default/mach_apic.h | 2 +- include/asm-x86/mach-es7000/mach_apic.h | 2 +- include/asm-x86/mach-visws/mach_apic.h | 2 +- include/asm-x86/smp.h | 7 ++++++- 12 files changed, 27 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index a99398f71234..4905a11b30e3 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1195,7 +1195,7 @@ void __init init_apic_mappings(void) * default configuration (or the MP table is broken). */ if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); #ifdef CONFIG_X86_IO_APIC { @@ -1265,7 +1265,7 @@ int __init APIC_init_uniprocessor(void) * might be zero if read from MP tables. Get it from LAPIC. */ #ifdef CONFIG_CRASH_DUMP - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); #endif phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 4ee521ff0a3e..9b4cacdfd74f 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -650,10 +650,10 @@ int __init verify_local_APIC(void) /* * The ID register is read/write in a real APIC. */ - reg0 = apic_read(APIC_ID); + reg0 = read_apic_id(); apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); - reg1 = apic_read(APIC_ID); + reg1 = read_apic_id(); apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); apic_write(APIC_ID, reg0); if (reg1 != (reg0 ^ APIC_ID_MASK)) @@ -892,7 +892,7 @@ void __init early_init_lapic_mapping(void) * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); } /** @@ -919,7 +919,7 @@ void __init init_apic_mappings(void) * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); } /* @@ -1140,7 +1140,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) maxlvt = lapic_get_maxlvt(); - apic_pm_state.apic_id = apic_read(APIC_ID); + apic_pm_state.apic_id = read_apic_id(); apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); apic_pm_state.apic_ldr = apic_read(APIC_LDR); apic_pm_state.apic_dfr = apic_read(APIC_DFR); diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 07352b74bda6..6a44e8dace37 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -97,7 +97,7 @@ static void flat_send_IPI_all(int vector) static int flat_apic_id_registered(void) { - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); + return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); } static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 8ed6eb967652..bfebe7a1966d 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1482,8 +1482,8 @@ void /*__init*/ print_local_APIC(void * dummy) printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", smp_processor_id(), hard_smp_processor_id()); - v = apic_read(APIC_ID); - printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v)); + printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, + GET_APIC_ID(read_apic_id())); v = apic_read(APIC_LVR); printk(KERN_INFO "... APIC VERSION: %08x\n", v); ver = GET_APIC_VERSION(v); @@ -1692,7 +1692,7 @@ void disable_IO_APIC(void) entry.delivery_mode = dest_ExtINT; /* ExtInt */ entry.vector = 0; entry.dest.physical.physical_dest = - GET_APIC_ID(apic_read(APIC_ID)); + GET_APIC_ID(read_apic_id()); /* * Add it to the IO-APIC irq-routing table: diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 6dd33628f28a..0ac92d6acf57 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1068,8 +1068,7 @@ void __apicdebuginit print_local_APIC(void * dummy) printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", smp_processor_id(), hard_smp_processor_id()); - v = apic_read(APIC_ID); - printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v)); + printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id())); v = apic_read(APIC_LVR); printk(KERN_INFO "... APIC VERSION: %08x\n", v); ver = GET_APIC_VERSION(v); @@ -1263,7 +1262,7 @@ void disable_IO_APIC(void) entry.dest_mode = 0; /* Physical */ entry.delivery_mode = dest_ExtINT; /* ExtInt */ entry.vector = 0; - entry.dest = GET_APIC_ID(apic_read(APIC_ID)); + entry.dest = GET_APIC_ID(read_apic_id()); /* * Add it to the IO-APIC irq-routing table: diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index cd4522b3e90e..4b46a37e0634 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -802,7 +802,7 @@ void __init mp_register_lapic_address(u64 address) set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); } diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 5e789bdb34fa..29d2c40e54a2 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -631,7 +631,7 @@ void __init mp_register_lapic_address(u64 address) mp_lapic_addr = (unsigned long)address; set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); } void __cpuinit mp_register_lapic(u8 id, u8 enabled) { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f45d740b1b6a..5da35d2cdbd8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -237,7 +237,7 @@ void __cpuinit smp_callin(void) /* * (This works even if the APIC is not enabled.) */ - phys_id = GET_APIC_ID(apic_read(APIC_ID)); + phys_id = GET_APIC_ID(read_apic_id()); cpuid = smp_processor_id(); if (cpu_isset(cpuid, cpu_callin_map)) { panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, @@ -1205,9 +1205,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) return; } - if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) { + if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { panic("Boot APIC ID in local APIC unexpected (%d vs %d)", - GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_physical_apicid); + GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); /* Or can we switch back to PIC here? */ } diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 1f56e7d5bfdd..14217a970c5e 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -54,7 +54,7 @@ static inline void init_apic_ldr(void) static inline int apic_id_registered(void) { - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); + return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index 0137b6e142cc..fbc8ad256f5a 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h @@ -141,7 +141,7 @@ static inline void setup_portio_remap(void) extern unsigned int boot_cpu_physical_apicid; static inline int check_phys_apicid_present(int cpu_physical_apicid) { - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); return (1); } diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h index efac6f0d139f..a9ef33a8a995 100644 --- a/include/asm-x86/mach-visws/mach_apic.h +++ b/include/asm-x86/mach-visws/mach_apic.h @@ -23,7 +23,7 @@ static inline int apic_id_registered(void) { - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); + return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); } /* diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index bcbd25cbd863..c0d693ca4357 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -174,6 +174,11 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } +static inline unsigned int read_apic_id(void) +{ + return *(u32 *)(APIC_BASE + APIC_ID); +} + # ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); # else @@ -181,7 +186,7 @@ extern int hard_smp_processor_id(void); static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); + return GET_APIC_ID(read_apic_id()); } # endif /* APIC_DEFINITION */ -- cgit v1.2.1 From ae261868658773538ddda829c50224e5851c2342 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:06 -0500 Subject: x86: add functions to determine if platform is a UV platform Add functions that can be used to determine if an x86_64 system is a SGI "UV" system. UV systems come in 3 types and are identified by the OEM ID in the MADT. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 4 +--- arch/x86/kernel/genapic_64.c | 25 +++++++++++++++++++++++++ include/asm-x86/genapic_32.h | 5 +++++ include/asm-x86/genapic_64.h | 5 +++++ 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e277c370246d..05878ac934db 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -56,9 +56,7 @@ EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_64 #include - -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } - +#include #else /* X86 */ diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 4ae7b6440260..c873f60c74a6 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -33,6 +33,8 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); struct genapic __read_mostly *genapic = &apic_flat; +static enum uv_system_type uv_system_type; + /* * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. */ @@ -64,3 +66,26 @@ void send_IPI_self(int vector) { __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); } + +int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) +{ + if (!strcmp(oem_id, "SGI")) { + if (!strcmp(oem_table_id, "UVL")) + uv_system_type = UV_LEGACY_APIC; + else if (!strcmp(oem_table_id, "UVX")) + uv_system_type = UV_X2APIC; + else if (!strcmp(oem_table_id, "UVH")) + uv_system_type = UV_NON_UNIQUE_APIC; + } + return 0; +} + +enum uv_system_type get_uv_system_type(void) +{ + return uv_system_type; +} + +int is_uv_system(void) +{ + return uv_system_type != UV_NONE; +} diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 5fa893dce729..f1b96932746b 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h @@ -114,4 +114,9 @@ struct genapic { extern struct genapic *genapic; +enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; +#define get_uv_system_type() UV_NONE +#define is_uv_system() 0 + + #endif diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index d7e516ccbaa4..914815c28ae5 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h @@ -33,5 +33,10 @@ extern struct genapic *genapic; extern struct genapic apic_flat; extern struct genapic apic_physflat; +extern int acpi_madt_oem_check(char *, char *); + +enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; +extern enum uv_system_type get_uv_system_type(void); +extern int is_uv_system(void); #endif -- cgit v1.2.1 From a65d1d644c2b65bfb99e766e7160d764b8b2bfa4 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:08 -0500 Subject: x86: increase size of APICID Increase the number of bits in an apicid from 8 to 32. By default, MP_processor_info() gets the APICID from the mpc_config_processor structure. However, this structure limits the size of APICID to 8 bits. This patch allows the caller of MP_processor_info() to optionally pass a larger APICID that will be used instead of the one in the mpc_config_processor struct. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 4 ++-- arch/x86/kernel/mpparse_64.c | 4 ++-- arch/x86/mm/srat_64.c | 6 +++++- include/asm-x86/apicdef.h | 9 ++++++--- include/asm-x86/mpspec.h | 4 ++-- 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 4b46a37e0634..7b7e008496e0 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -807,7 +807,7 @@ void __init mp_register_lapic_address(u64 address) Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); } -void __cpuinit mp_register_lapic (u8 id, u8 enabled) +void __cpuinit mp_register_lapic (int id, u8 enabled) { if (MAX_APICS - id <= 0) { printk(KERN_WARNING "Processor #%d invalid (max %d)\n", @@ -862,7 +862,7 @@ static u8 uniq_ioapic_id(u8 id) return id; } -void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) +void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) { int idx = 0; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 29d2c40e54a2..4840a846904e 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -633,7 +633,7 @@ void __init mp_register_lapic_address(u64 address) if (boot_cpu_physical_apicid == -1U) boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); } -void __cpuinit mp_register_lapic(u8 id, u8 enabled) +void __cpuinit mp_register_lapic(int id, u8 enabled) { if (!enabled) { ++disabled_cpus; @@ -683,7 +683,7 @@ static u8 uniq_ioapic_id(u8 id) return find_first_zero_bit(used, 256); } -void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) +void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) { int idx = 0; diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 04e06c8226e3..1bae9c855ceb 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -20,6 +20,7 @@ #include #include #include +#include int acpi_numa __initdata; @@ -148,7 +149,10 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } - apic_id = pa->apic_id; + if (is_uv_system()) + apic_id = (pa->apic_id << 8) | pa->local_sapic_eid; + else + apic_id = pa->apic_id; apicid_to_node[apic_id] = node; acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 8b244683431b..6b9008c78731 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h @@ -133,7 +133,7 @@ # define MAX_IO_APICS 64 #else # define MAX_IO_APICS 128 -# define MAX_LOCAL_APIC 256 +# define MAX_LOCAL_APIC 32768 #endif /* @@ -406,6 +406,9 @@ struct local_apic { #undef u32 -#define BAD_APICID 0xFFu - +#ifdef CONFIG_X86_32 + #define BAD_APICID 0xFFu +#else + #define BAD_APICID 0xFFFFu +#endif #endif diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 31bac12a97d1..1f6445b147f8 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -47,9 +47,9 @@ extern void get_smp_config(void); void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI -extern void mp_register_lapic(u8 id, u8 enabled); +extern void mp_register_lapic(int id, u8 enabled); extern void mp_register_lapic_address(u64 address); -extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base); +extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs(void); -- cgit v1.2.1 From ac049c1db72963e19b29b63c42ab8759384eef20 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:09 -0500 Subject: x86: parsing for ACPI "SAPIC" table Add kernel support for new ACPI "sapic" tables that contain 16-bit APICIDs. This patch simply adds parsing of an optional SAPIC table if present. Otherwise, the traditional local APIC table is used. Note: the SAPIC table is not a new ACPI table - it exists on other architectures but is not currently recognized by x86_64. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 05878ac934db..b33ebf6ea4f1 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -264,6 +264,24 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) return 0; } +static int __init +acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_local_sapic *processor = NULL; + + processor = (struct acpi_madt_local_sapic *)header; + + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + mp_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */ + + return 0; +} + static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) @@ -757,8 +775,12 @@ static int __init acpi_parse_madt_lapic_entries(void) mp_register_lapic_address(acpi_lapic_addr); - count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, - MAX_APICS); + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, + acpi_parse_sapic, MAX_APICS); + + if (!count) + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, + acpi_parse_lapic, MAX_APICS); if (!count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); /* TBD: Cleanup to allow fallback to MPS */ -- cgit v1.2.1 From 0d3e865b2644e4a2250ab25c5475a0cd0d514b7e Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:11 -0500 Subject: x86: add UV specific header for MMR definitions Definitions of UV MMRs. Note: this file is auto-generated by hardware design tools. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/uv/uv_mmrs.h | 373 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 373 insertions(+) create mode 100644 include/asm-x86/uv/uv_mmrs.h diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h new file mode 100644 index 000000000000..3b69fe6b6376 --- /dev/null +++ b/include/asm-x86/uv/uv_mmrs.h @@ -0,0 +1,373 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV MMR definitions + * + * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef __ASM_X86_UV_MMRS__ +#define __ASM_X86_UV_MMRS__ + +/* + * AUTO GENERATED - Do not edit + */ + + #define UV_MMR_ENABLE (1UL << 63) + +/* ========================================================================= */ +/* UVH_IPI_INT */ +/* ========================================================================= */ +#define UVH_IPI_INT 0x60500UL +#define UVH_IPI_INT_32 0x0360 + +#define UVH_IPI_INT_VECTOR_SHFT 0 +#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL +#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 +#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL +#define UVH_IPI_INT_DESTMODE_SHFT 11 +#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL +#define UVH_IPI_INT_APIC_ID_SHFT 16 +#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL +#define UVH_IPI_INT_SEND_SHFT 63 +#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL + +union uvh_ipi_int_u { + unsigned long v; + struct uvh_ipi_int_s { + unsigned long vector_ : 8; /* RW */ + unsigned long delivery_mode : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long rsvd_12_15 : 4; /* */ + unsigned long apic_id : 32; /* RW */ + unsigned long rsvd_48_62 : 15; /* */ + unsigned long send : 1; /* WP */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0 + +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL + +union uvh_lb_bau_intd_payload_queue_first_u { + unsigned long v; + struct uvh_lb_bau_intd_payload_queue_first_s { + unsigned long rsvd_0_3: 4; /* */ + unsigned long address : 39; /* RW */ + unsigned long rsvd_43_48: 6; /* */ + unsigned long node_id : 14; /* RW */ + unsigned long rsvd_63 : 1; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8 + +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL + +union uvh_lb_bau_intd_payload_queue_last_u { + unsigned long v; + struct uvh_lb_bau_intd_payload_queue_last_s { + unsigned long rsvd_0_3: 4; /* */ + unsigned long address : 39; /* RW */ + unsigned long rsvd_43_63: 21; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00 + +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 +#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL + +union uvh_lb_bau_intd_payload_queue_tail_u { + unsigned long v; + struct uvh_lb_bau_intd_payload_queue_tail_s { + unsigned long rsvd_0_3: 4; /* */ + unsigned long address : 39; /* RW */ + unsigned long rsvd_43_63: 21; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL + +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL +union uvh_lb_bau_intd_software_acknowledge_u { + unsigned long v; + struct uvh_lb_bau_intd_software_acknowledge_s { + unsigned long pending_0 : 1; /* RW, W1C */ + unsigned long pending_1 : 1; /* RW, W1C */ + unsigned long pending_2 : 1; /* RW, W1C */ + unsigned long pending_3 : 1; /* RW, W1C */ + unsigned long pending_4 : 1; /* RW, W1C */ + unsigned long pending_5 : 1; /* RW, W1C */ + unsigned long pending_6 : 1; /* RW, W1C */ + unsigned long pending_7 : 1; /* RW, W1C */ + unsigned long timeout_0 : 1; /* RW, W1C */ + unsigned long timeout_1 : 1; /* RW, W1C */ + unsigned long timeout_2 : 1; /* RW, W1C */ + unsigned long timeout_3 : 1; /* RW, W1C */ + unsigned long timeout_4 : 1; /* RW, W1C */ + unsigned long timeout_5 : 1; /* RW, W1C */ + unsigned long timeout_6 : 1; /* RW, W1C */ + unsigned long timeout_7 : 1; /* RW, W1C */ + unsigned long rsvd_16_63: 48; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ +/* ========================================================================= */ +#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8 + +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63 +#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL + +union uvh_lb_bau_sb_activation_control_u { + unsigned long v; + struct uvh_lb_bau_sb_activation_control_s { + unsigned long index : 6; /* RW */ + unsigned long rsvd_6_61: 56; /* */ + unsigned long push : 1; /* WP */ + unsigned long init : 1; /* WP */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0 + +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL + +union uvh_lb_bau_sb_activation_status_0_u { + unsigned long v; + struct uvh_lb_bau_sb_activation_status_0_s { + unsigned long status : 64; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8 + +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 +#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL + +union uvh_lb_bau_sb_activation_status_1_u { + unsigned long v; + struct uvh_lb_bau_sb_activation_status_1_s { + unsigned long status : 64; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ +/* ========================================================================= */ +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0 + +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 +#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL + +union uvh_lb_bau_sb_descriptor_base_u { + unsigned long v; + struct uvh_lb_bau_sb_descriptor_base_s { + unsigned long rsvd_0_11 : 12; /* */ + unsigned long page_address : 31; /* RW */ + unsigned long rsvd_43_48 : 6; /* */ + unsigned long node_id : 14; /* RW */ + unsigned long rsvd_63 : 1; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_NODE_ID */ +/* ========================================================================= */ +#define UVH_NODE_ID 0x0UL + +#define UVH_NODE_ID_FORCE1_SHFT 0 +#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL +#define UVH_NODE_ID_MANUFACTURER_SHFT 1 +#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL +#define UVH_NODE_ID_PART_NUMBER_SHFT 12 +#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL +#define UVH_NODE_ID_REVISION_SHFT 28 +#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL +#define UVH_NODE_ID_NODE_ID_SHFT 32 +#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL +#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48 +#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL +#define UVH_NODE_ID_NI_PORT_SHFT 56 +#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL + +union uvh_node_id_u { + unsigned long v; + struct uvh_node_id_s { + unsigned long force1 : 1; /* RO */ + unsigned long manufacturer : 11; /* RO */ + unsigned long part_number : 16; /* RO */ + unsigned long revision : 4; /* RO */ + unsigned long node_id : 15; /* RW */ + unsigned long rsvd_47 : 1; /* */ + unsigned long nodes_per_bit : 7; /* RW */ + unsigned long rsvd_55 : 1; /* */ + unsigned long ni_port : 4; /* RO */ + unsigned long rsvd_60_63 : 4; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL + +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_gru_overlay_config_mmr_u { + unsigned long v; + struct uvh_rh_gam_gru_overlay_config_mmr_s { + unsigned long rsvd_0_27: 28; /* */ + unsigned long base : 18; /* RW */ + unsigned long gr4 : 1; /* RW */ + unsigned long rsvd_47_51: 5; /* */ + unsigned long n_gru : 4; /* RW */ + unsigned long rsvd_56_62: 7; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ +/* ========================================================================= */ +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL + +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL + +union uvh_rh_gam_mmr_overlay_config_mmr_u { + unsigned long v; + struct uvh_rh_gam_mmr_overlay_config_mmr_s { + unsigned long rsvd_0_25: 26; /* */ + unsigned long base : 20; /* RW */ + unsigned long dual_hub : 1; /* RW */ + unsigned long rsvd_47_62: 16; /* */ + unsigned long enable : 1; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RTC */ +/* ========================================================================= */ +#define UVH_RTC 0x28000UL + +#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 +#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL + +union uvh_rtc_u { + unsigned long v; + struct uvh_rtc_s { + unsigned long real_time_clock : 56; /* RW */ + unsigned long rsvd_56_63 : 8; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_SI_ADDR_MAP_CONFIG */ +/* ========================================================================= */ +#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL + +#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 +#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL +#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 +#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL + +union uvh_si_addr_map_config_u { + unsigned long v; + struct uvh_si_addr_map_config_s { + unsigned long m_skt : 6; /* RW */ + unsigned long rsvd_6_7: 2; /* */ + unsigned long n_skt : 4; /* RW */ + unsigned long rsvd_12_63: 52; /* */ + } s; +}; + + +#endif /* __ASM_X86_UV_MMRS__ */ -- cgit v1.2.1 From 952cf6d7ae52cc5423baa57e978e20e732a89ba6 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:13 -0500 Subject: x86: define the macros and tables for the basic UV infrastructure. Define the macros and tables for the basic UV infrastructure. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/uv/uv_hub.h | 210 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 include/asm-x86/uv/uv_hub.h diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h new file mode 100644 index 000000000000..b4fcf9cf8951 --- /dev/null +++ b/include/asm-x86/uv/uv_hub.h @@ -0,0 +1,210 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV architectural definitions + * + * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef __ASM_X86_UV_HUB_H__ +#define __ASM_X86_UV_HUB_H__ + +#include +#include +#include +#include + + +/* + * Addressing Terminology + * + * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of + * routers always have low bit of 1, C/MBricks have low bit + * equal to 0. Most addressing macros that target UV hub chips + * right shift the NASID by 1 to exclude the always-zero bit. + * + * SNASID - NASID right shifted by 1 bit. + * + * + * Memory/UV-HUB Processor Socket Address Format: + * +--------+---------------+---------------------+ + * |00..0000| SNASID | NodeOffset | + * +--------+---------------+---------------------+ + * <--- N bits --->|<--------M bits -----> + * + * M number of node offset bits (35 .. 40) + * N number of SNASID bits (0 .. 10) + * + * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). + * The actual values are configuration dependent and are set at + * boot time + * + * APICID format + * NOTE!!!!!! This is the current format of the APICID. However, code + * should assume that this will change in the future. Use functions + * in this file for all APICID bit manipulations and conversion. + * + * 1111110000000000 + * 5432109876543210 + * nnnnnnnnnnlc0cch + * sssssssssss + * + * n = snasid bits + * l = socket number on board + * c = core + * h = hyperthread + * s = bits that are in the socket CSR + * + * Note: Processor only supports 12 bits in the APICID register. The ACPI + * tables hold all 16 bits. Software needs to be aware of this. + * + * Unless otherwise specified, all references to APICID refer to + * the FULL value contained in ACPI tables, not the subset in the + * processor APICID register. + */ + + +/* + * Maximum number of bricks in all partitions and in all coherency domains. + * This is the total number of bricks accessible in the numalink fabric. It + * includes all C & M bricks. Routers are NOT included. + * + * This value is also the value of the maximum number of non-router NASIDs + * in the numalink fabric. + * + * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. + */ +#define UV_MAX_NUMALINK_BLADES 16384 + +/* + * Maximum number of C/Mbricks within a software SSI (hardware may support + * more). + */ +#define UV_MAX_SSI_BLADES 256 + +/* + * The largest possible NASID of a C or M brick (+ 2) + */ +#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) + +/* + * The following defines attributes of the HUB chip. These attributes are + * frequently referenced and are kept in the per-cpu data areas of each cpu. + * They are kept together in a struct to minimize cache misses. + */ +struct uv_hub_info_s { + unsigned long global_mmr_base; + unsigned short local_nasid; + unsigned short gnode_upper; + unsigned short coherency_domain_number; + unsigned short numa_blade_id; + unsigned char blade_processor_id; + unsigned char m_val; + unsigned char n_val; +}; +DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); +#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) +#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) + +/* + * Local & Global MMR space macros. + * Note: macros are intended to be used ONLY by inline functions + * in this file - not by other kernel code. + */ +#define UV_SNASID(n) ((n) >> 1) +#define UV_NASID(n) ((n) << 1) + +#define UV_LOCAL_MMR_BASE 0xf4000000UL +#define UV_GLOBAL_MMR32_BASE 0xf8000000UL +#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) + +#define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff +#define UV_GLOBAL_MMR32_SNASID_SHIFT 15 +#define UV_GLOBAL_MMR64_SNASID_SHIFT 26 + +#define UV_GLOBAL_MMR32_NASID_BITS(n) \ + (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \ + (UV_GLOBAL_MMR32_SNASID_SHIFT)) + +#define UV_GLOBAL_MMR64_NASID_BITS(n) \ + ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) + +#define UV_APIC_NASID_SHIFT 6 + +/* + * Extract a NASID from an APICID (full apicid, not processor subset) + */ +static inline int uv_apicid_to_nasid(int apicid) +{ + return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); +} + +/* + * Access global MMRs using the low memory MMR32 space. This region supports + * faster MMR access but not all MMRs are accessible in this space. + */ +static inline unsigned long *uv_global_mmr32_address(int nasid, + unsigned long offset) +{ + return __va(UV_GLOBAL_MMR32_BASE | + UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); +} + +static inline void uv_write_global_mmr32(int nasid, unsigned long offset, + unsigned long val) +{ + *uv_global_mmr32_address(nasid, offset) = val; +} + +static inline unsigned long uv_read_global_mmr32(int nasid, + unsigned long offset) +{ + return *uv_global_mmr32_address(nasid, offset); +} + +/* + * Access Global MMR space using the MMR space located at the top of physical + * memory. + */ +static inline unsigned long *uv_global_mmr64_address(int nasid, + unsigned long offset) +{ + return __va(UV_GLOBAL_MMR64_BASE | + UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); +} + +static inline void uv_write_global_mmr64(int nasid, unsigned long offset, + unsigned long val) +{ + *uv_global_mmr64_address(nasid, offset) = val; +} + +static inline unsigned long uv_read_global_mmr64(int nasid, + unsigned long offset) +{ + return *uv_global_mmr64_address(nasid, offset); +} + +/* + * Access node local MMRs. Faster than using global space but only local MMRs + * are accessible. + */ +static inline unsigned long *uv_local_mmr_address(unsigned long offset) +{ + return __va(UV_LOCAL_MMR_BASE | offset); +} + +static inline unsigned long uv_read_local_mmr(unsigned long offset) +{ + return *uv_local_mmr_address(offset); +} + +static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) +{ + *uv_local_mmr_address(offset) = val; +} + +#endif /* __ASM_X86_UV_HUB__ */ + -- cgit v1.2.1 From 8400def8252f90ecd056657c0bac806afadd8511 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:14 -0500 Subject: x86: define the macros and tables for blade functions Add UV macros for converting between cpu numbers, blade numbers and node numbers. Note that these are used ONLY within x86_64 UV modules, and are not for general kernel use. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- include/asm-x86/uv/uv_hub.h | 74 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h index b4fcf9cf8951..26b9240d1e23 100644 --- a/include/asm-x86/uv/uv_hub.h +++ b/include/asm-x86/uv/uv_hub.h @@ -206,5 +206,79 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) *uv_local_mmr_address(offset) = val; } +/* + * Structures and definitions for converting between cpu, node, and blade + * numbers. + */ +struct uv_blade_info { + unsigned short nr_posible_cpus; + unsigned short nr_online_cpus; + unsigned short nasid; +}; +struct uv_blade_info *uv_blade_info; +extern short *uv_node_to_blade; +extern short *uv_cpu_to_blade; +extern short uv_possible_blades; + +/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ +static inline int uv_blade_processor_id(void) +{ + return uv_hub_info->blade_processor_id; +} + +/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ +static inline int uv_numa_blade_id(void) +{ + return uv_hub_info->numa_blade_id; +} + +/* Convert a cpu number to the the UV blade number */ +static inline int uv_cpu_to_blade_id(int cpu) +{ + return uv_cpu_to_blade[cpu]; +} + +/* Convert linux node number to the UV blade number */ +static inline int uv_node_to_blade_id(int nid) +{ + return uv_node_to_blade[nid]; +} + +/* Convert a blade id to the NASID of the blade */ +static inline int uv_blade_to_nasid(int bid) +{ + return uv_blade_info[bid].nasid; +} + +/* Determine the number of possible cpus on a blade */ +static inline int uv_blade_nr_possible_cpus(int bid) +{ + return uv_blade_info[bid].nr_posible_cpus; +} + +/* Determine the number of online cpus on a blade */ +static inline int uv_blade_nr_online_cpus(int bid) +{ + return uv_blade_info[bid].nr_online_cpus; +} + +/* Convert a cpu id to the NASID of the blade containing the cpu */ +static inline int uv_cpu_to_nasid(int cpu) +{ + return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; +} + +/* Convert a node number to the NASID of the blade */ +static inline int uv_node_to_nasid(int nid) +{ + return uv_blade_info[uv_node_to_blade_id(nid)].nasid; +} + +/* Maximum possible number of blades */ +static inline int uv_num_possible_blades(void) +{ + return uv_possible_blades; +} + #endif /* __ASM_X86_UV_HUB__ */ -- cgit v1.2.1 From 570da318cf0e3053e62030253494c410a18d4be7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 8 Apr 2008 12:20:50 +0200 Subject: x86: support for new UV apic, prepare Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_64.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index c873f60c74a6..4cc1c218ae4c 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -25,9 +25,11 @@ #endif /* which logical CPU number maps to which CPU (physical APIC ID) */ +#ifdef CONFIG_SMP u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_cpu_to_apicid_early_ptr; +#endif DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); -- cgit v1.2.1 From ac23d4ee3f84de33c16ed7e68f9adee2386e74fb Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 28 Mar 2008 14:12:16 -0500 Subject: x86: support for new UV apic UV supports really big systems. So big, in fact, that the APICID register does not contain enough bits to contain an APICID that is unique across all cpus. The UV BIOS supports 3 APICID modes: - legacy mode. This mode uses the old APIC mode where APICID is in bits [31:24] of the APICID register. - x2apic mode. This mode is whitebox-compatible. APICIDs are unique across all cpus. Standard x2apic APIC operations (Intel-defined) can be used for IPIs. The node identifier fits within the Intel-defined portion of the APICID register. - x2apic-uv mode. In this mode, the APICIDs on each node have unique IDs, but IDs on different node are not unique. For example, if each mode has 32 cpus, the APICIDs on each node might be 0 - 31. Every node has the same set of IDs. The UV hub is used to route IPIs/interrupts to the correct node. Traditional APIC operations WILL NOT WORK. In x2apic-uv mode, the ACPI tables all contain a full unique ID (note: exact bit layout still changing but the following is close): nnnnnnnnnnlc0cch n = unique node number l = socket number on board c = core h = hyperthread Only the "lc0cch" bits are written to the APICID register. The remaining bits are supplied by having the get_apic_id() function "OR" the extra bits into the value read from the APICID register. (Hmmm.. why not keep the ENTIRE APICID register in per-cpu data....) The x2apic-uv mode is recognized by the MADT table containing: oem_id = "SGI" oem_table_id = "UV-X" Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/apic_64.c | 2 + arch/x86/kernel/genapic_64.c | 18 +++ arch/x86/kernel/genx2apic_uv_x.c | 245 +++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/setup64.c | 4 + arch/x86/kernel/smpboot.c | 5 + include/asm-x86/genapic_64.h | 5 + include/asm-x86/smp.h | 5 + 8 files changed, 285 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kernel/genx2apic_uv_x.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 1fe841a86f7e..0bf2fb55aa74 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -89,7 +89,7 @@ scx200-y += scx200_32.o ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) - obj-y += genapic_64.o genapic_flat_64.o + obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_AUDIT) += audit_64.o diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 9b4cacdfd74f..8b0fad47a5d2 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -738,6 +738,7 @@ void __cpuinit setup_local_APIC(void) unsigned int value; int i, j; + preempt_disable(); value = apic_read(APIC_LVR); BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f); @@ -831,6 +832,7 @@ void __cpuinit setup_local_APIC(void) else value = APIC_DM_NMI | APIC_LVT_MASKED; apic_write(APIC_LVT1, value); + preempt_enable(); } void __cpuinit lapic_setup_esr(void) diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 4cc1c218ae4c..910a4a777a4c 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,7 @@ void *x86_cpu_to_apicid_early_ptr; #endif DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); +DEFINE_PER_CPU(int, x2apic_extra_bits); struct genapic __read_mostly *genapic = &apic_flat; @@ -42,6 +44,9 @@ static enum uv_system_type uv_system_type; */ void __init setup_apic_routing(void) { + if (uv_system_type == UV_NON_UNIQUE_APIC) + genapic = &apic_x2apic_uv_x; + else #ifdef CONFIG_ACPI /* * Quirk: some x86_64 machines can only use physical APIC mode @@ -82,6 +87,19 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 0; } +unsigned int read_apic_id(void) +{ + unsigned int id; + + WARN_ON(preemptible()); + id = apic_read(APIC_ID); + if (uv_system_type >= UV_X2APIC) + id |= __get_cpu_var(x2apic_extra_bits); + else + id = (id >> 24) & 0xFFu;; + return id; +} + enum uv_system_type get_uv_system_type(void) { return uv_system_type; diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c new file mode 100644 index 000000000000..5d77c9cd8e15 --- /dev/null +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -0,0 +1,245 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI UV APIC functions (note: not an Intel compatible APIC) + * + * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); +EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); + +struct uv_blade_info *uv_blade_info; +EXPORT_SYMBOL_GPL(uv_blade_info); + +short *uv_node_to_blade; +EXPORT_SYMBOL_GPL(uv_node_to_blade); + +short *uv_cpu_to_blade; +EXPORT_SYMBOL_GPL(uv_cpu_to_blade); + +short uv_possible_blades; +EXPORT_SYMBOL_GPL(uv_possible_blades); + +/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ + +static cpumask_t uv_target_cpus(void) +{ + return cpumask_of_cpu(0); +} + +static cpumask_t uv_vector_allocation_domain(int cpu) +{ + cpumask_t domain = CPU_MASK_NONE; + cpu_set(cpu, domain); + return domain; +} + +int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) +{ + unsigned long val; + int nasid; + + nasid = uv_apicid_to_nasid(phys_apicid); + val = (1UL << UVH_IPI_INT_SEND_SHFT) | + (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | + (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | + (6 << UVH_IPI_INT_DELIVERY_MODE_SHFT); + uv_write_global_mmr64(nasid, UVH_IPI_INT, val); + return 0; +} + +static void uv_send_IPI_one(int cpu, int vector) +{ + unsigned long val, apicid; + int nasid; + + apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ + nasid = uv_apicid_to_nasid(apicid); + val = + (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid << + UVH_IPI_INT_APIC_ID_SHFT) | + (vector << UVH_IPI_INT_VECTOR_SHFT); + uv_write_global_mmr64(nasid, UVH_IPI_INT, val); + printk(KERN_DEBUG + "UV: IPI to cpu %d, apicid 0x%lx, vec %d, nasid%d, val 0x%lx\n", + cpu, apicid, vector, nasid, val); +} + +static void uv_send_IPI_mask(cpumask_t mask, int vector) +{ + unsigned int cpu; + + for (cpu = 0; cpu < NR_CPUS; ++cpu) + if (cpu_isset(cpu, mask)) + uv_send_IPI_one(cpu, vector); +} + +static void uv_send_IPI_allbutself(int vector) +{ + cpumask_t mask = cpu_online_map; + + cpu_clear(smp_processor_id(), mask); + + if (!cpus_empty(mask)) + uv_send_IPI_mask(mask, vector); +} + +static void uv_send_IPI_all(int vector) +{ + uv_send_IPI_mask(cpu_online_map, vector); +} + +static int uv_apic_id_registered(void) +{ + return 1; +} + +static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + cpu = first_cpu(cpumask); + if ((unsigned)cpu < NR_CPUS) + return per_cpu(x86_cpu_to_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int phys_pkg_id(int index_msb) +{ + return GET_APIC_ID(read_apic_id()) >> index_msb; +} + +#ifdef ZZZ /* Needs x2apic patch */ +static void uv_send_IPI_self(int vector) +{ + apic_write(APIC_SELF_IPI, vector); +} +#endif + +struct genapic apic_x2apic_uv_x = { + .name = "UV large system", + .int_delivery_mode = dest_Fixed, + .int_dest_mode = (APIC_DEST_PHYSICAL != 0), + .target_cpus = uv_target_cpus, + .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ + .apic_id_registered = uv_apic_id_registered, + .send_IPI_all = uv_send_IPI_all, + .send_IPI_allbutself = uv_send_IPI_allbutself, + .send_IPI_mask = uv_send_IPI_mask, + /* ZZZ.send_IPI_self = uv_send_IPI_self, */ + .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, + .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ +}; + +static __cpuinit void set_x2apic_extra_bits(int nasid) +{ + __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); +} + +/* + * Called on boot cpu. + */ +static __init void uv_system_init(void) +{ + union uvh_si_addr_map_config_u m_n_config; + int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; + unsigned long mmr_base; + + m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); + mmr_base = + uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & + ~UV_MMR_ENABLE; + printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); + + last_nasid = -1; + for_each_possible_cpu(cpu) { + nid = cpu_to_node(cpu); + nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); + if (nasid != last_nasid) + uv_possible_blades++; + last_nasid = nasid; + } + printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); + + bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); + uv_blade_info = alloc_bootmem_pages(bytes); + + bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); + uv_node_to_blade = alloc_bootmem_pages(bytes); + memset(uv_node_to_blade, 255, bytes); + + bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); + uv_cpu_to_blade = alloc_bootmem_pages(bytes); + memset(uv_cpu_to_blade, 255, bytes); + + last_nasid = -1; + blade = -1; + lcpu = -1; + for_each_possible_cpu(cpu) { + nid = cpu_to_node(cpu); + nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); + if (nasid != last_nasid) { + blade++; + lcpu = -1; + uv_blade_info[blade].nr_posible_cpus = 0; + uv_blade_info[blade].nr_online_cpus = 0; + } + last_nasid = nasid; + lcpu++; + + uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; + uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; + uv_cpu_hub_info(cpu)->numa_blade_id = blade; + uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; + uv_cpu_hub_info(cpu)->local_nasid = nasid; + uv_cpu_hub_info(cpu)->gnode_upper = + nasid & ~((1 << uv_hub_info->n_val) - 1); + uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; + uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ + uv_blade_info[blade].nasid = nasid; + uv_blade_info[blade].nr_posible_cpus++; + uv_node_to_blade[nid] = blade; + uv_cpu_to_blade[cpu] = blade; + + printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", + cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); + printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); + } +} + +/* + * Called on each cpu to initialize the per_cpu UV data area. + */ +void __cpuinit uv_cpu_init(void) +{ + if (!uv_node_to_blade) + uv_system_init(); + + uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; + + if (get_uv_system_type() == UV_NON_UNIQUE_APIC) + set_x2apic_extra_bits(uv_hub_info->local_nasid); +} diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index 6b4e3262e8cb..4be499cd6a0d 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifndef CONFIG_DEBUG_BOOT_PARAMS struct boot_params __initdata boot_params; @@ -264,4 +265,7 @@ void __cpuinit cpu_init (void) fpu_init(); raw_local_save_flags(kernel_eflags); + + if (is_uv_system()) + uv_cpu_init(); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5da35d2cdbd8..22bf6c29454f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1101,6 +1101,7 @@ static __init void disable_smp(void) */ static int __init smp_sanity_check(unsigned max_cpus) { + preempt_disable(); if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { printk(KERN_WARNING "weird, boot CPU (#%d) not listed" "by the BIOS.\n", hard_smp_processor_id()); @@ -1112,6 +1113,7 @@ static int __init smp_sanity_check(unsigned max_cpus) * get out of here now! */ if (!smp_found_config && !acpi_lapic) { + preempt_enable(); printk(KERN_NOTICE "SMP motherboard not detected.\n"); disable_smp(); if (APIC_init_uniprocessor()) @@ -1130,6 +1132,7 @@ static int __init smp_sanity_check(unsigned max_cpus) boot_cpu_physical_apicid); physid_set(hard_smp_processor_id(), phys_cpu_present_map); } + preempt_enable(); /* * If we couldn't find a local APIC, then get out of here now! @@ -1205,11 +1208,13 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) return; } + preempt_disable(); if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { panic("Boot APIC ID in local APIC unexpected (%d vs %d)", GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); /* Or can we switch back to PIC here? */ } + preempt_enable(); #ifdef CONFIG_X86_32 connect_bsp_APIC(); diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index 914815c28ae5..1de931b263ce 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h @@ -39,4 +39,9 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; extern enum uv_system_type get_uv_system_type(void); extern int is_uv_system(void); +extern struct genapic apic_x2apic_uv_x; +DECLARE_PER_CPU(int, x2apic_extra_bits); +extern void uv_cpu_init(void); +extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); + #endif diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index c0d693ca4357..b35566264879 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -174,10 +174,15 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } +#ifdef CONFIG_X86_32_SMP static inline unsigned int read_apic_id(void) { return *(u32 *)(APIC_BASE + APIC_ID); } +#else +extern unsigned int read_apic_id(void); +#endif + # ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); -- cgit v1.2.1 From a24eae88ad3767d0a4a940a10e4a9cec849b7778 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 30 Mar 2008 12:17:12 +0200 Subject: x86: uv fix Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b35566264879..654724c58f5b 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -174,7 +174,7 @@ static inline int logical_smp_processor_id(void) return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } -#ifdef CONFIG_X86_32_SMP +#ifndef CONFIG_X86_64 static inline unsigned int read_apic_id(void) { return *(u32 *)(APIC_BASE + APIC_ID); -- cgit v1.2.1 From b447a468fcd130aa8951672b6115c673c274e888 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 25 Mar 2008 15:06:51 -0700 Subject: x86: clean up non-smp usage of cpu maps Cleanup references to the early cpu maps for the non-SMP configuration and remove some functions called for SMP configurations only. Cc: Andi Kleen Cc: Christoph Lameter Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 28 +++++++++++----------------- arch/x86/mm/numa_64.c | 4 +++- include/asm-x86/smp.h | 5 +++++ include/asm-x86/topology.h | 15 +++++++++++---- 4 files changed, 30 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1179aa06cdbf..dc7940955b7a 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -10,7 +10,7 @@ #include #include -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP) /* * Copy data used in early init routines from the initial arrays to the * per cpu data areas. These arrays then become expendable and the @@ -21,21 +21,12 @@ static void __init setup_per_cpu_maps(void) int cpu; for_each_possible_cpu(cpu) { -#ifdef CONFIG_SMP - if (per_cpu_offset(cpu)) { -#endif - per_cpu(x86_cpu_to_apicid, cpu) = - x86_cpu_to_apicid_init[cpu]; - per_cpu(x86_bios_cpu_apicid, cpu) = + per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu]; + per_cpu(x86_bios_cpu_apicid, cpu) = x86_bios_cpu_apicid_init[cpu]; #ifdef CONFIG_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = + per_cpu(x86_cpu_to_node_map, cpu) = x86_cpu_to_node_map_init[cpu]; -#endif -#ifdef CONFIG_SMP - } else - printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", - cpu); #endif } @@ -72,17 +63,20 @@ void __init setup_per_cpu_areas(void) /* Copy section for each CPU (we discard the original) */ size = PERCPU_ENOUGH_ROOM; - printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); - for_each_cpu_mask(i, cpu_possible_map) { + + for_each_possible_cpu(i) { char *ptr; #ifndef CONFIG_NEED_MULTIPLE_NODES ptr = alloc_bootmem_pages(size); #else int node = early_cpu_to_node(i); - if (!node_online(node) || !NODE_DATA(node)) + if (!node_online(node) || !NODE_DATA(node)) { ptr = alloc_bootmem_pages(size); + printk(KERN_INFO + "cpu %d has no node or node-local memory\n", i); + } else ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); #endif @@ -96,7 +90,7 @@ void __init setup_per_cpu_areas(void) memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); } - /* setup percpu data maps early */ + /* Setup percpu data maps */ setup_per_cpu_maps(); } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 18267a02e67a..2ea56f48f29b 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -31,13 +31,15 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES]; struct memnode memnode; +#ifdef CONFIG_SMP int x86_cpu_to_node_map_init[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; void *x86_cpu_to_node_map_early_ptr; +EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); +#endif DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); -EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 654724c58f5b..d973c11688ce 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -29,10 +29,15 @@ extern int smp_num_siblings; extern unsigned int num_processors; extern cpumask_t cpu_initialized; +#ifdef CONFIG_SMP extern u16 x86_cpu_to_apicid_init[]; extern u16 x86_bios_cpu_apicid_init[]; extern void *x86_cpu_to_apicid_early_ptr; extern void *x86_bios_cpu_apicid_early_ptr; +#else +#define x86_cpu_to_apicid_early_ptr NULL +#define x86_bios_cpu_apicid_early_ptr NULL +#endif DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 8d1a1f3d21b4..81a29eb08ac4 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -38,8 +38,13 @@ extern int cpu_to_node_map[]; #endif DECLARE_PER_CPU(int, x86_cpu_to_node_map); + +#ifdef CONFIG_SMP extern int x86_cpu_to_node_map_init[]; extern void *x86_cpu_to_node_map_early_ptr; +#else +#define x86_cpu_to_node_map_early_ptr NULL +#endif extern cpumask_t node_to_cpumask_map[]; @@ -54,6 +59,8 @@ static inline int cpu_to_node(int cpu) } #else /* CONFIG_X86_64 */ + +#ifdef CONFIG_SMP static inline int early_cpu_to_node(int cpu) { int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; @@ -65,6 +72,9 @@ static inline int early_cpu_to_node(int cpu) else return NUMA_NO_NODE; } +#else +#define early_cpu_to_node(cpu) cpu_to_node(cpu) +#endif static inline int cpu_to_node(int cpu) { @@ -76,10 +86,7 @@ static inline int cpu_to_node(int cpu) return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; } #endif - if (per_cpu_offset(cpu)) - return per_cpu(x86_cpu_to_node_map, cpu); - else - return NUMA_NO_NODE; + return per_cpu(x86_cpu_to_node_map, cpu); } #endif /* CONFIG_X86_64 */ -- cgit v1.2.1 From 1725037f7232c1518b9be1832f5823b7c576c35c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 31 Mar 2008 14:52:15 +0200 Subject: x86: set_cyc2ns_scale() remove prev scale Peter Zijlstra pointed out that it's unused. Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc_32.c | 3 +-- arch/x86/kernel/tsc_64.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index 68657d8526fb..3d7e6e9fa6c2 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -84,8 +84,8 @@ DEFINE_PER_CPU(unsigned long, cyc2ns); static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) { - unsigned long flags, prev_scale, *scale; unsigned long long tsc_now, ns_now; + unsigned long flags, *scale; local_irq_save(flags); sched_clock_idle_sleep_event(); @@ -95,7 +95,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) rdtscll(tsc_now); ns_now = __cycles_2_ns(tsc_now); - prev_scale = *scale; if (cpu_khz) *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index d3bebaaad842..ceeba01e7f47 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c @@ -44,8 +44,8 @@ DEFINE_PER_CPU(unsigned long, cyc2ns); static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) { - unsigned long flags, prev_scale, *scale; unsigned long long tsc_now, ns_now; + unsigned long flags, *scale; local_irq_save(flags); sched_clock_idle_sleep_event(); @@ -55,7 +55,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) rdtscll(tsc_now); ns_now = __cycles_2_ns(tsc_now); - prev_scale = *scale; if (cpu_khz) *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; -- cgit v1.2.1 From f5149a49f994e5c469ac398af7cdeb8eb612d3a4 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Sun, 30 Mar 2008 21:02:07 -0500 Subject: x86: support for new UV apic, fix Yinghai Lu pointed out a bug in the previous patches, fix double-shift of apicid. Signed-off-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_64.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 910a4a777a4c..c9eabe36ee36 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -95,8 +95,6 @@ unsigned int read_apic_id(void) id = apic_read(APIC_ID); if (uv_system_type >= UV_X2APIC) id |= __get_cpu_var(x2apic_extra_bits); - else - id = (id >> 24) & 0xFFu;; return id; } -- cgit v1.2.1 From 13af4836b3914b23946f6a8982934e2c828c183f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 2 Apr 2008 13:23:22 +0200 Subject: x86: improve default idle Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 8 -------- arch/x86/kernel/process_64.c | 8 -------- 2 files changed, 16 deletions(-) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 08c41ed5e805..3903a8f2eb97 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -113,16 +113,8 @@ void default_idle(void) local_irq_disable(); if (!need_resched()) { - ktime_t t0, t1; - u64 t0n, t1n; - - t0 = ktime_get(); - t0n = ktime_to_ns(t0); safe_halt(); /* enables interrupts racelessly */ local_irq_disable(); - t1 = ktime_get(); - t1n = ktime_to_ns(t1); - sched_clock_idle_wakeup_event(t1n - t0n); } local_irq_enable(); current_thread_info()->status |= TS_POLLING; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4f40272474dd..e75ccc8a2b87 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -107,16 +107,8 @@ void default_idle(void) smp_mb(); local_irq_disable(); if (!need_resched()) { - ktime_t t0, t1; - u64 t0n, t1n; - - t0 = ktime_get(); - t0n = ktime_to_ns(t0); safe_halt(); /* enables interrupts racelessly */ local_irq_disable(); - t1 = ktime_get(); - t1n = ktime_to_ns(t1); - sched_clock_idle_wakeup_event(t1n - t0n); } local_irq_enable(); current_thread_info()->status |= TS_POLLING; -- cgit v1.2.1 From 19b4e7f4e9b1c88459cf2c9b9ccaa09cb8bf854d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 10 Apr 2008 10:12:27 +0200 Subject: x86: extend the scheduled bzImage symlinks removal use of the bzImage symlinks in developer scripts is still widespread, so lets extend the removal period by 2 years. These symlinks cost us near nothing. Signed-off-by: Ingo Molnar --- Documentation/feature-removal-schedule.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index bf0e3df8e7a1..164c89394cff 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -212,7 +212,7 @@ Who: Stephen Hemminger --------------------------- What: i386/x86_64 bzImage symlinks -When: April 2008 +When: April 2010 Why: The i386/x86_64 merge provides a symlink to the old bzImage location so not yet updated user space tools, e.g. package -- cgit v1.2.1 From 431ef7a2a486201967304fcc9cfc33e945626fed Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Tue, 1 Apr 2008 19:41:50 +0400 Subject: x86: debug Store - call kfree if only we really need it We should call for kfree if only we really need it. Though it's safe to call kfree with NULL pointer passed in this code we've already tested the pointer and can eliminate the call Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/ds.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index dcd918c1580d..11c11b8ec48d 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -220,11 +220,11 @@ int ds_allocate(void **dsp, size_t bts_size_in_bytes) int ds_free(void **dsp) { - if (*dsp) + if (*dsp) { kfree((void *)get_bts_buffer_base(*dsp)); - kfree(*dsp); - *dsp = NULL; - + kfree(*dsp); + *dsp = NULL; + } return 0; } -- cgit v1.2.1 From d2b3bab63bd9999a29eb74326a7baf61901385e6 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 3 Apr 2008 23:48:29 +0100 Subject: x86: MPSC should use P6 NOPs I've now noticed that the machine I call MPENTIUM4 for 32-bit kernels is called MPSC for 64-bit kernels, and in that case it still doesn't get the P6 NOPs it ought to. hpa explains that MK8 should still be excluded, so it's just a matter of including MPSC along with MPENTIUM4. Signed-off-by: Hugh Dickins Acked-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 9304bfba7d45..57072f2716f9 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -388,7 +388,7 @@ config X86_OOSTORE # config X86_P6_NOP def_bool y - depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4) + depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC) config X86_TSC def_bool y -- cgit v1.2.1 From f408b43ceedce49f26c01cd4a68dbbdbe2743e51 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 1 Apr 2008 15:44:01 -0700 Subject: x86: fix VisualWS and Voyager kexec build failures without this patch: VOYAGER: kernel/built-in.o: In function `crash_kexec': (.text+0x28588): undefined reference to `machine_crash_shutdown' VISWS: kernel/built-in.o: In function `crash_kexec': /next-20080401/kernel/kexec.c:1074: undefined reference to `machine_crash_shutdown' make[1]: *** [.tmp_vmlinux1] Error 1 because arch/x86/kernel/reboot.c isn't built since CONFIG_X86_BIOS_REBOOT=n, so machine_crash_shutdown() isn't available. This patch does seem a small bit odd since the KEXEC help text says that kexec is independent of the system firmware. Signed-off-by: Randy Dunlap Cc: Eric Biederman Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a0d7406e8b37..0a7193ae45ed 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1121,6 +1121,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" + depends on X86_64 || X86_BIOS_REBOOT help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot -- cgit v1.2.1 From d61ecf0b53131564949bc4196e70f676000a845a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 4 Apr 2008 17:11:09 +0200 Subject: x86: 4kstacks default Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index f4413c04e687..610aaecc19f8 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -106,8 +106,8 @@ config DEBUG_NX_TEST config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" - depends on DEBUG_KERNEL depends on X86_32 + default y help If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates -- cgit v1.2.1 From af926a5830079bf36253dcf3a1b75b1497fc7fd1 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:40:32 +0400 Subject: x86: move x86_bios_cpu_apicid to io_apic_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_64.c | 3 +++ arch/x86/kernel/genapic_64.c | 2 -- arch/x86/kernel/mpparse_64.c | 2 -- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 8b0fad47a5d2..274ebabf49a2 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -91,6 +91,9 @@ unsigned long mp_lapic_addr; unsigned int boot_cpu_physical_apicid = -1U; EXPORT_SYMBOL(boot_cpu_physical_apicid); +DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); + unsigned int __cpuinitdata maxcpus = NR_CPUS; /* * Get the LAPIC version diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index c9eabe36ee36..7df38c4c8575 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -31,8 +31,6 @@ u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_cpu_to_apicid_early_ptr; #endif -DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; -EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); DEFINE_PER_CPU(int, x2apic_extra_bits); struct genapic __read_mostly *genapic = &apic_flat; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 4840a846904e..378c4ba80b47 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -60,8 +60,6 @@ u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = {[0 ... NR_CPUS - 1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; #endif -DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; -EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP -- cgit v1.2.1 From 76eb41319d6ab98d17c81a8001a6d7ed9f8359ee Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:40:41 +0400 Subject: x86: move x86_cpu_to_apicid to setup.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 4 ++++ arch/x86/kernel/smpboot.c | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index dc7940955b7a..01119d9b013e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -9,6 +9,10 @@ #include #include #include +#include + +DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; +EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP) /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 22bf6c29454f..412061a0bf2b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -77,8 +77,6 @@ u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_cpu_to_apicid_early_ptr; -DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; -EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; -- cgit v1.2.1 From 0fc0906e59df1427d194b78376d15ca48079f6bf Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:40:48 +0400 Subject: x86: move phys_cpu_present_map to setup.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 5 ----- arch/x86/kernel/mpparse_64.c | 5 ----- arch/x86/kernel/setup.c | 4 ++++ arch/x86/kernel/smpboot.c | 3 --- 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 7b7e008496e0..4f4cfad9ae57 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -69,11 +69,6 @@ unsigned int boot_cpu_physical_apicid = -1U; #endif #endif -/* Make it easy to share the UP and SMP code: */ -#ifndef CONFIG_X86_SMP -physid_mask_t phys_cpu_present_map; -#endif - /* * Intel MP BIOS table parsing routines: */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 378c4ba80b47..8d7365511ac0 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -70,11 +70,6 @@ unsigned int boot_cpu_physical_apicid = -1U; #endif #endif -/* Make it easy to share the UP and SMP code: */ -#ifndef CONFIG_X86_SMP -physid_mask_t phys_cpu_present_map; -#endif - /* * Intel MP BIOS table parsing routines: */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 01119d9b013e..011fcdd213ff 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -9,11 +9,15 @@ #include #include #include +#include #include DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); +/* Bitmask of physically existing CPUs */ +physid_mask_t phys_cpu_present_map; + #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP) /* * Copy data used in early init routines from the initial arrays to the diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 412061a0bf2b..7e6aa1c790a2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -88,9 +88,6 @@ u8 apicid_2_node[MAX_APICID]; /* Internal processor count */ unsigned int num_processors; -/* Bitmask of physically existing CPUs */ -physid_mask_t phys_cpu_present_map; - /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -- cgit v1.2.1 From 708650afe98a50d0b280bea9dcf5f160b94ee9fb Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:40:54 +0400 Subject: x86: move x86_cpu_to_apicid_init to smpboot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_64.c | 6 ------ arch/x86/kernel/smpboot.c | 3 ++- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 7df38c4c8575..9546ef408b92 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -25,12 +25,6 @@ #include #endif -/* which logical CPU number maps to which CPU (physical APIC ID) */ -#ifdef CONFIG_SMP -u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata - = { [0 ... NR_CPUS-1] = BAD_APICID }; -void *x86_cpu_to_apicid_early_ptr; -#endif DEFINE_PER_CPU(int, x2apic_extra_bits); struct genapic __read_mostly *genapic = &apic_flat; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7e6aa1c790a2..e3ea074ba6a4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -72,12 +72,13 @@ * integrate apic between arches, we can probably do a better job, but * right now, they'll stay here -- glommer */ -#ifdef CONFIG_X86_32 + /* which logical CPU number maps to which CPU (physical APIC ID) */ u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_cpu_to_apicid_early_ptr; +#ifdef CONFIG_X86_32 u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; -- cgit v1.2.1 From 16ecf7a47cf4f1c97189a551b001195aed550cc2 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:00 +0400 Subject: x86: move x86_bios_cpu_apicid_init to smpboot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 6 ------ arch/x86/kernel/smpboot.c | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 8d7365511ac0..3196c2318640 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -55,12 +55,6 @@ int mp_irq_entries; int nr_ioapics; -#ifdef CONFIG_SMP -u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata - = {[0 ... NR_CPUS - 1] = BAD_APICID }; -void *x86_bios_cpu_apicid_early_ptr; -#endif - /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP unsigned int num_processors; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e3ea074ba6a4..abf63767cd46 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -78,11 +78,11 @@ u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_cpu_to_apicid_early_ptr; -#ifdef CONFIG_X86_32 u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata = { [0 ... NR_CPUS-1] = BAD_APICID }; void *x86_bios_cpu_apicid_early_ptr; +#ifdef CONFIG_X86_32 u8 apicid_2_node[MAX_APICID]; #endif -- cgit v1.2.1 From 61048c6328819b0973ef662f6d46f2e2bc753ceb Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:07 +0400 Subject: x86: don't set IO APIC features if IO APIC is not enabled Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 24 +++++++++++++++++++++--- include/asm-x86/mach-default/mach_apic.h | 2 ++ include/asm-x86/mpspec.h | 3 --- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 4f4cfad9ae57..09cb77813680 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -221,6 +221,8 @@ static void __init MP_bus_info (struct mpc_config_bus *m) } } +#ifdef CONFIG_X86_IO_APIC + static int bad_ioapic(unsigned long address) { if (nr_ioapics >= MAX_IO_APICS) { @@ -263,6 +265,8 @@ static void __init MP_intsrc_info (struct mpc_config_intsrc *m) panic("Max # of irq sources exceeded!!\n"); } +#endif + static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) { Dprintk("Lint: type %d, pol %d, trig %d, bus %d," @@ -421,21 +425,25 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) } case MP_IOAPIC: { +#ifdef CONFIG_X86_IO_APIC struct mpc_config_ioapic *m= (struct mpc_config_ioapic *)mpt; MP_ioapic_info(m); mpt+=sizeof(*m); count+=sizeof(*m); +#endif break; } case MP_INTSRC: { +#ifdef CONFIG_X86_IO_APIC struct mpc_config_intsrc *m= (struct mpc_config_intsrc *)mpt; MP_intsrc_info(m); mpt+=sizeof(*m); count+=sizeof(*m); +#endif break; } case MP_LINTSRC: @@ -463,6 +471,8 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) return num_processors; } +#ifdef CONFIG_X86_IO_APIC + static int __init ELCR_trigger(unsigned int irq) { unsigned int port; @@ -537,11 +547,15 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) MP_intsrc_info(&intsrc); } +#endif + static inline void __init construct_default_ISA_mptable(int mpc_default_type) { struct mpc_config_processor processor; struct mpc_config_bus bus; +#ifdef CONFIG_X86_IO_APIC struct mpc_config_ioapic ioapic; +#endif struct mpc_config_lintsrc lintsrc; int linttypes[2] = { mp_ExtINT, mp_NMI }; int i; @@ -597,6 +611,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) MP_bus_info(&bus); } +#ifdef CONFIG_X86_IO_APIC ioapic.mpc_type = MP_IOAPIC; ioapic.mpc_apicid = 2; ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; @@ -608,7 +623,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) * We set up most of the low 16 IO-APIC pins according to MPS rules. */ construct_default_ioirq_mptable(mpc_default_type); - +#endif lintsrc.mpc_type = MP_LINTSRC; lintsrc.mpc_irqflag = 0; /* conforming */ lintsrc.mpc_srcbusid = 0; @@ -670,6 +685,8 @@ void __init get_smp_config (void) printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); return; } + +#ifdef CONFIG_X86_IO_APIC /* * If there are no explicit MP IRQ entries, then we are * broken. We set up most of the low 16 IO-APIC pins to @@ -687,7 +704,7 @@ void __init get_smp_config (void) construct_default_ioirq_mptable(0); } - +#endif } else BUG(); @@ -967,8 +984,9 @@ void __init mp_config_acpi_legacy_irqs (void) intsrc.mpc_type = MP_INTSRC; intsrc.mpc_irqflag = 0; /* Conforming */ intsrc.mpc_srcbus = MP_ISA_BUS; +#ifdef CONFIG_X86_IO_APIC intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; - +#endif /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 14217a970c5e..0a6634f62abe 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h @@ -69,8 +69,10 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline void setup_apic_routing(void) { +#ifdef CONFIG_X86_IO_APIC printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); +#endif } static inline int apicid_to_node(int logical_apicid) diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 1f6445b147f8..08cc5e027958 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -36,9 +36,6 @@ extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; extern int smp_found_config; -extern int nr_ioapics; -extern int mp_irq_entries; -extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; extern int mpc_default_type; extern unsigned long mp_lapic_addr; -- cgit v1.2.1 From ba1ce61ff226bddebd2101a29fe56b4664ef7cec Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 7 Apr 2008 13:11:09 +0200 Subject: x86: don't set io apic features if io-apic is not enabled, fix Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 09cb77813680..44f52f623fd6 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -429,9 +429,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) struct mpc_config_ioapic *m= (struct mpc_config_ioapic *)mpt; MP_ioapic_info(m); - mpt+=sizeof(*m); - count+=sizeof(*m); #endif + mpt+=sizeof(struct mpc_config_ioapic); + count+=sizeof(struct mpc_config_ioapic); break; } case MP_INTSRC: @@ -441,9 +441,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) (struct mpc_config_intsrc *)mpt; MP_intsrc_info(m); - mpt+=sizeof(*m); - count+=sizeof(*m); #endif + mpt+=sizeof(struct mpc_config_intsrc); + count+=sizeof(struct mpc_config_intsrc); break; } case MP_LINTSRC: -- cgit v1.2.1 From 9f640ccbc67b7c306206502bca420a80ad15c965 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:13 +0400 Subject: x86: move mp_ioapics to io_apic_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 4 ++++ arch/x86/kernel/mpparse_32.c | 5 ----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index bfebe7a1966d..2e01b69a46e0 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -71,6 +71,10 @@ int sis_apic_bug = -1; */ int nr_ioapic_registers[MAX_IO_APICS]; +/* I/O APIC entries */ +struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; +int nr_ioapics; + static int disable_timer_pin_1 __initdata; /* diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 44f52f623fd6..302253cbfc6a 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -47,17 +47,12 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; static int mp_current_pci_id; -/* I/O APIC entries */ -struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; - /* # of MP IRQ source entries */ struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* MP IRQ source entries */ int mp_irq_entries; -int nr_ioapics; - int pic_mode; /* Make it easy to share the UP and SMP code: */ -- cgit v1.2.1 From 9c7408f3c491b6fe990cd2dacd5471ca21760551 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:19 +0400 Subject: x86: move mp_ioapics to io_apic_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_64.c | 4 ++++ arch/x86/kernel/mpparse_64.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 0ac92d6acf57..e9217edab434 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -103,6 +103,10 @@ DEFINE_SPINLOCK(vector_lock); */ int nr_ioapic_registers[MAX_IO_APICS]; +/* I/O APIC entries */ +struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; +int nr_ioapics; + /* * Rough estimation of how many shared IRQs there are, can * be changed anytime. diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 3196c2318640..f1015bf53cf0 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -44,8 +44,6 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; static int mp_current_pci_id = 0; -/* I/O APIC entries */ -struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; /* # of MP IRQ source entries */ struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; @@ -53,8 +51,6 @@ struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* MP IRQ source entries */ int mp_irq_entries; -int nr_ioapics; - /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP unsigned int num_processors; -- cgit v1.2.1 From 9e5c5f1dd29c86307e6b3cfa75e85d0efccc1f6b Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:26 +0400 Subject: x86: move mp_ioapic_routing to boot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 2 ++ arch/x86/kernel/mpparse_32.c | 7 +------ arch/x86/kernel/mpparse_64.c | 7 +------ include/asm-x86/io_apic.h | 7 +++++++ 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b33ebf6ea4f1..9cf575184536 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -320,6 +320,8 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e #ifdef CONFIG_X86_IO_APIC +struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; + static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 302253cbfc6a..b6f1e4e235e3 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -837,12 +837,7 @@ void __cpuinit mp_register_lapic (int id, u8 enabled) #define MP_ISA_BUS 0 #define MP_MAX_IOAPIC_PIN 127 -static struct mp_ioapic_routing { - int apic_id; - int gsi_base; - int gsi_end; - u32 pin_programmed[4]; -} mp_ioapic_routing[MAX_IO_APICS]; +extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; static int mp_find_ioapic (int gsi) { diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index f1015bf53cf0..813057cb2ddb 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -630,12 +630,7 @@ void __cpuinit mp_register_lapic(int id, u8 enabled) #define MP_ISA_BUS 0 #define MP_MAX_IOAPIC_PIN 127 -static struct mp_ioapic_routing { - int apic_id; - int gsi_base; - int gsi_end; - u32 pin_programmed[4]; -} mp_ioapic_routing[MAX_IO_APICS]; +extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; static int mp_find_ioapic(int gsi) { diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 095e8e30a345..0c9e17c73e05 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h @@ -110,6 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS]; * MP-BIOS irq configuration table structures: */ +struct mp_ioapic_routing { + int apic_id; + int gsi_base; + int gsi_end; + u32 pin_programmed[4]; +}; + /* I/O APIC entries */ extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; -- cgit v1.2.1 From 584f734d035db5c5c07f938e464ddeeefde7ec31 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:32 +0400 Subject: x86: move mp_irqs to io_apics_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 6 ++++++ arch/x86/kernel/mpparse_32.c | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 2e01b69a46e0..db1b1f30b650 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -75,6 +75,12 @@ int nr_ioapic_registers[MAX_IO_APICS]; struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; int nr_ioapics; +/* MP IRQ source entries */ +struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; + +/* # of MP IRQ source entries */ +int mp_irq_entries; + static int disable_timer_pin_1 __initdata; /* diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index b6f1e4e235e3..2b16e5c71a64 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -47,12 +47,6 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; static int mp_current_pci_id; -/* # of MP IRQ source entries */ -struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; - -/* MP IRQ source entries */ -int mp_irq_entries; - int pic_mode; /* Make it easy to share the UP and SMP code: */ -- cgit v1.2.1 From 350bae1d3f0d0c763c5bb9cc5fb5c363bd0086db Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:38 +0400 Subject: x86: move mp_irqs to io_apic_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_64.c | 6 ++++++ arch/x86/kernel/mpparse_64.c | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index e9217edab434..65b6840e1820 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -107,6 +107,12 @@ int nr_ioapic_registers[MAX_IO_APICS]; struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; int nr_ioapics; +/* MP IRQ source entries */ +struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; + +/* # of MP IRQ source entries */ +int mp_irq_entries; + /* * Rough estimation of how many shared IRQs there are, can * be changed anytime. diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 813057cb2ddb..07c98dbd468a 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -45,12 +45,6 @@ int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; static int mp_current_pci_id = 0; -/* # of MP IRQ source entries */ -struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; - -/* MP IRQ source entries */ -int mp_irq_entries; - /* Make it easy to share the UP and SMP code: */ #ifndef CONFIG_X86_SMP unsigned int num_processors; -- cgit v1.2.1 From 2fe60147570231cde0d1f14711d2e34ccdf54b65 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:44 +0400 Subject: x86: move up & smp variables to setup.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic_32.c | 3 --- arch/x86/kernel/apic_64.c | 4 ---- arch/x86/kernel/mpparse_32.c | 13 ------------- arch/x86/kernel/mpparse_64.c | 9 --------- arch/x86/kernel/setup.c | 8 ++++++++ arch/x86/kernel/smpboot.c | 5 ----- include/asm-x86/smp.h | 3 ++- 7 files changed, 10 insertions(+), 35 deletions(-) diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 4905a11b30e3..687208190b06 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -52,9 +52,6 @@ unsigned long mp_lapic_addr; -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; - DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 274ebabf49a2..9e8e5c050c55 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -87,10 +87,6 @@ static unsigned long apic_phys; unsigned long mp_lapic_addr; -/* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; -EXPORT_SYMBOL(boot_cpu_physical_apicid); - DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 2b16e5c71a64..ed4b3bc0e97a 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -49,15 +49,6 @@ static int mp_current_pci_id; int pic_mode; -/* Make it easy to share the UP and SMP code: */ -#ifndef CONFIG_X86_SMP -unsigned int num_processors; -unsigned disabled_cpus __cpuinitdata; -#ifndef CONFIG_X86_LOCAL_APIC -unsigned int boot_cpu_physical_apicid = -1U; -#endif -#endif - /* * Intel MP BIOS table parsing routines: */ @@ -93,9 +84,7 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) int apicid; if (!(m->mpc_cpuflag & CPU_ENABLED)) { -#ifdef CONFIG_X86_SMP disabled_cpus++; -#endif return; } @@ -817,9 +806,7 @@ void __cpuinit mp_register_lapic (int id, u8 enabled) } if (!enabled) { -#ifdef CONFIG_X86_SMP ++disabled_cpus; -#endif return; } diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 07c98dbd468a..f860727e9151 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -45,15 +45,6 @@ int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; static int mp_current_pci_id = 0; -/* Make it easy to share the UP and SMP code: */ -#ifndef CONFIG_X86_SMP -unsigned int num_processors; -unsigned disabled_cpus __cpuinitdata; -#ifndef CONFIG_X86_LOCAL_APIC -unsigned int boot_cpu_physical_apicid = -1U; -#endif -#endif - /* * Intel MP BIOS table parsing routines: */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 011fcdd213ff..ed157c90412e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -12,6 +12,14 @@ #include #include +unsigned int num_processors; +unsigned disabled_cpus __cpuinitdata; +/* Processor that is doing the boot up */ +unsigned int boot_cpu_physical_apicid = -1U; +EXPORT_SYMBOL(boot_cpu_physical_apicid); + +physid_mask_t phys_cpu_present_map; + DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index abf63767cd46..21ad3f396a05 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -86,14 +86,9 @@ void *x86_bios_cpu_apicid_early_ptr; u8 apicid_2_node[MAX_APICID]; #endif -/* Internal processor count */ -unsigned int num_processors; - /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -unsigned disabled_cpus __cpuinitdata; - /* Store all idle threads, this can be reused instead of creating * a new thread. Also avoids complicated thread destroy functionality * for idle threads. diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index d973c11688ce..3496e1c299b2 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -128,7 +128,6 @@ int native_cpu_up(unsigned int cpunum); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern unsigned disabled_cpus; extern void prefill_possible_map(void); #define SMP_TRAMPOLINE_BASE 0x6000 @@ -144,6 +143,8 @@ static inline int num_booting_cpus(void) } #endif /* CONFIG_SMP */ +extern unsigned disabled_cpus __cpuinitdata; + #ifdef CONFIG_X86_32_SMP /* * This function is needed by all SMP systems. It must _always_ be valid -- cgit v1.2.1 From dfac2189c2e1fbb90ee83f15b5e404425754e9f4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:50 +0400 Subject: x86: move mp_register_lapic to boot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 21 ++++++++++++++++----- arch/x86/kernel/mpparse_32.c | 16 ---------------- arch/x86/kernel/mpparse_64.c | 10 ---------- include/asm-x86/mpspec.h | 1 - 4 files changed, 16 insertions(+), 32 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 9cf575184536..11bd11847b19 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -39,6 +39,7 @@ #include #include #include +#include #ifdef CONFIG_X86_LOCAL_APIC # include @@ -239,6 +240,16 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) return 0; } +static void __cpuinit acpi_register_lapic(int id, u8 enabled) +{ + if (!enabled) { + ++disabled_cpus; + return; + } + + generic_processor_info(id, 0); +} + static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { @@ -258,8 +269,8 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ - mp_register_lapic(processor->id, /* APIC ID */ - processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */ + acpi_register_lapic(processor->id, /* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } @@ -276,8 +287,8 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) acpi_table_print_madt_entry(header); - mp_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ - processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */ + acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } @@ -554,7 +565,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) buffer.pointer = NULL; tmp_map = cpu_present_map; - mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); + acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); /* * If mp_register_lapic successfully generates a new logical cpu diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index ed4b3bc0e97a..ebec70a14198 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -797,22 +797,6 @@ void __init mp_register_lapic_address(u64 address) Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); } -void __cpuinit mp_register_lapic (int id, u8 enabled) -{ - if (MAX_APICS - id <= 0) { - printk(KERN_WARNING "Processor #%d invalid (max %d)\n", - id, MAX_APICS); - return; - } - - if (!enabled) { - ++disabled_cpus; - return; - } - - generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR))); -} - #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index f860727e9151..03c19a2e6e9e 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -601,16 +601,6 @@ void __init mp_register_lapic_address(u64 address) if (boot_cpu_physical_apicid == -1U) boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); } -void __cpuinit mp_register_lapic(int id, u8 enabled) -{ - if (!enabled) { - ++disabled_cpus; - return; - } - - generic_processor_info(id, 0); -} - #define MP_ISA_BUS 0 #define MP_MAX_IOAPIC_PIN 127 diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 08cc5e027958..c614051f4fc9 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -44,7 +44,6 @@ extern void get_smp_config(void); void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI -extern void mp_register_lapic(int id, u8 enabled); extern void mp_register_lapic_address(u64 address); extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, -- cgit v1.2.1 From 31d2092eb0c23636b73d2c24c0c11b66470cef58 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:41:57 +0400 Subject: x86: move mp_register_lapic_address to boot.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 12 +++++++++++- arch/x86/kernel/mpparse_32.c | 12 ------------ arch/x86/kernel/mpparse_64.c | 8 -------- include/asm-x86/mpspec.h | 1 - 4 files changed, 11 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 11bd11847b19..057ccf1d5ad4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -765,6 +765,16 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) * Parse LAPIC entries in MADT * returns 0 on success, < 0 on error */ + +static void __init acpi_register_lapic_address(unsigned long address) +{ + mp_lapic_addr = address; + + set_fixmap_nocache(FIX_APIC_BASE, address); + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); +} + static int __init acpi_parse_madt_lapic_entries(void) { int count; @@ -786,7 +796,7 @@ static int __init acpi_parse_madt_lapic_entries(void) return count; } - mp_register_lapic_address(acpi_lapic_addr); + acpi_register_lapic_address(acpi_lapic_addr); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_sapic, MAX_APICS); diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index ebec70a14198..052043ed6499 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -785,18 +785,6 @@ void __init find_smp_config (void) #ifdef CONFIG_ACPI -void __init mp_register_lapic_address(u64 address) -{ - mp_lapic_addr = (unsigned long) address; - - set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); - - if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); - - Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); -} - #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 03c19a2e6e9e..1c3bf80b3ba6 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -594,14 +594,6 @@ void __init find_smp_config(void) #ifdef CONFIG_ACPI -void __init mp_register_lapic_address(u64 address) -{ - mp_lapic_addr = (unsigned long)address; - set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); - if (boot_cpu_physical_apicid == -1U) - boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); -} - #define MP_ISA_BUS 0 #define MP_MAX_IOAPIC_PIN 127 diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index c614051f4fc9..57a991b9c053 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h @@ -44,7 +44,6 @@ extern void get_smp_config(void); void __cpuinit generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI -extern void mp_register_lapic_address(u64 address); extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); -- cgit v1.2.1 From 4ef81297f72655c4f4c1ae9c371453f9ca796aad Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:03 +0400 Subject: x86: lindent mpparse_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 420 ++++++++++++++++++++++--------------------- 1 file changed, 214 insertions(+), 206 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 052043ed6499..f1c896ab8275 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -41,10 +41,10 @@ int smp_found_config; * MP-table. */ #if defined (CONFIG_MCA) || defined (CONFIG_EISA) -int mp_bus_id_to_type [MAX_MP_BUSSES]; +int mp_bus_id_to_type[MAX_MP_BUSSES]; #endif DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); -int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; +int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; static int mp_current_pci_id; int pic_mode; @@ -53,7 +53,6 @@ int pic_mode; * Intel MP BIOS table parsing routines: */ - /* * Checksum an MP configuration block. */ @@ -75,8 +74,9 @@ static int __init mpf_checksum(unsigned char *mp, int len) * doing this .... */ -static int mpc_record; -static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata; +static int mpc_record; +static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] + __cpuinitdata; #endif static void __cpuinit MP_processor_info(struct mpc_config_processor *m) @@ -87,66 +87,63 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) disabled_cpus++; return; } - #ifdef CONFIG_X86_NUMAQ apicid = mpc_apic_id(m, translation_table[mpc_record]); #else Dprintk("Processor #%d %u:%u APIC version %d\n", m->mpc_apicid, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); + (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); apicid = m->mpc_apicid; #endif - if (m->mpc_featureflag&(1<<0)) + if (m->mpc_featureflag & (1 << 0)) Dprintk(" Floating point unit present.\n"); - if (m->mpc_featureflag&(1<<7)) + if (m->mpc_featureflag & (1 << 7)) Dprintk(" Machine Exception supported.\n"); - if (m->mpc_featureflag&(1<<8)) + if (m->mpc_featureflag & (1 << 8)) Dprintk(" 64 bit compare & exchange supported.\n"); - if (m->mpc_featureflag&(1<<9)) + if (m->mpc_featureflag & (1 << 9)) Dprintk(" Internal APIC present.\n"); - if (m->mpc_featureflag&(1<<11)) + if (m->mpc_featureflag & (1 << 11)) Dprintk(" SEP present.\n"); - if (m->mpc_featureflag&(1<<12)) + if (m->mpc_featureflag & (1 << 12)) Dprintk(" MTRR present.\n"); - if (m->mpc_featureflag&(1<<13)) + if (m->mpc_featureflag & (1 << 13)) Dprintk(" PGE present.\n"); - if (m->mpc_featureflag&(1<<14)) + if (m->mpc_featureflag & (1 << 14)) Dprintk(" MCA present.\n"); - if (m->mpc_featureflag&(1<<15)) + if (m->mpc_featureflag & (1 << 15)) Dprintk(" CMOV present.\n"); - if (m->mpc_featureflag&(1<<16)) + if (m->mpc_featureflag & (1 << 16)) Dprintk(" PAT present.\n"); - if (m->mpc_featureflag&(1<<17)) + if (m->mpc_featureflag & (1 << 17)) Dprintk(" PSE present.\n"); - if (m->mpc_featureflag&(1<<18)) + if (m->mpc_featureflag & (1 << 18)) Dprintk(" PSN present.\n"); - if (m->mpc_featureflag&(1<<19)) + if (m->mpc_featureflag & (1 << 19)) Dprintk(" Cache Line Flush Instruction present.\n"); /* 20 Reserved */ - if (m->mpc_featureflag&(1<<21)) + if (m->mpc_featureflag & (1 << 21)) Dprintk(" Debug Trace and EMON Store present.\n"); - if (m->mpc_featureflag&(1<<22)) + if (m->mpc_featureflag & (1 << 22)) Dprintk(" ACPI Thermal Throttle Registers present.\n"); - if (m->mpc_featureflag&(1<<23)) + if (m->mpc_featureflag & (1 << 23)) Dprintk(" MMX present.\n"); - if (m->mpc_featureflag&(1<<24)) + if (m->mpc_featureflag & (1 << 24)) Dprintk(" FXSR present.\n"); - if (m->mpc_featureflag&(1<<25)) + if (m->mpc_featureflag & (1 << 25)) Dprintk(" XMM present.\n"); - if (m->mpc_featureflag&(1<<26)) + if (m->mpc_featureflag & (1 << 26)) Dprintk(" Willamette New Instructions present.\n"); - if (m->mpc_featureflag&(1<<27)) + if (m->mpc_featureflag & (1 << 27)) Dprintk(" Self Snoop present.\n"); - if (m->mpc_featureflag&(1<<28)) + if (m->mpc_featureflag & (1 << 28)) Dprintk(" HT present.\n"); - if (m->mpc_featureflag&(1<<29)) + if (m->mpc_featureflag & (1 << 29)) Dprintk(" Thermal Monitor present.\n"); /* 30, 31 Reserved */ - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { Dprintk(" Bootup CPU\n"); boot_cpu_physical_apicid = m->mpc_apicid; @@ -155,7 +152,7 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) generic_processor_info(apicid, m->mpc_apicver); } -static void __init MP_bus_info (struct mpc_config_bus *m) +static void __init MP_bus_info(struct mpc_config_bus *m) { char str[7]; @@ -171,14 +168,14 @@ static void __init MP_bus_info (struct mpc_config_bus *m) #if MAX_MP_BUSSES < 256 if (m->mpc_busid >= MAX_MP_BUSSES) { printk(KERN_WARNING "MP table busid value (%d) for bustype %s " - " is too large, max. supported is %d\n", - m->mpc_busid, str, MAX_MP_BUSSES - 1); + " is too large, max. supported is %d\n", + m->mpc_busid, str, MAX_MP_BUSSES - 1); return; } #endif set_bit(m->mpc_busid, mp_bus_not_pci); - if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { + if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { #ifdef CONFIG_X86_NUMAQ mpc_oem_pci_bus(m, translation_table[mpc_record]); #endif @@ -187,11 +184,11 @@ static void __init MP_bus_info (struct mpc_config_bus *m) mp_current_pci_id++; #if defined(CONFIG_EISA) || defined (CONFIG_MCA) mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; - } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { + } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; - } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { + } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; - } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { + } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; } else { printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); @@ -216,13 +213,13 @@ static int bad_ioapic(unsigned long address) return 0; } -static void __init MP_ioapic_info (struct mpc_config_ioapic *m) +static void __init MP_ioapic_info(struct mpc_config_ioapic *m) { if (!(m->mpc_flags & MPC_APIC_USABLE)) return; printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", - m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); + m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); if (bad_ioapic(m->mpc_apicaddr)) return; @@ -231,38 +228,41 @@ static void __init MP_ioapic_info (struct mpc_config_ioapic *m) nr_ioapics++; } -static void __init MP_intsrc_info (struct mpc_config_intsrc *m) +static void __init MP_intsrc_info(struct mpc_config_intsrc *m) { - mp_irqs [mp_irq_entries] = *m; + mp_irqs[mp_irq_entries] = *m; Dprintk("Int: type %d, pol %d, trig %d, bus %d," " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, - m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, + m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); if (++mp_irq_entries == MAX_IRQ_SOURCES) panic("Max # of irq sources exceeded!!\n"); } #endif -static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) +static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) { Dprintk("Lint: type %d, pol %d, trig %d, bus %d," " IRQ %02x, APIC ID %x, APIC LINT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, - m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, + m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); } #ifdef CONFIG_X86_NUMAQ -static void __init MP_translation_info (struct mpc_config_translation *m) +static void __init MP_translation_info(struct mpc_config_translation *m) { - printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); + printk(KERN_INFO + "Translation: record %d, type %d, quad %d, global %d, local %d\n", + mpc_record, m->trans_type, m->trans_quad, m->trans_global, + m->trans_local); - if (mpc_record >= MAX_MPC_ENTRY) + if (mpc_record >= MAX_MPC_ENTRY) printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); else - translation_table[mpc_record] = m; /* stash this for later */ + translation_table[mpc_record] = m; /* stash this for later */ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) node_set_online(m->trans_quad); } @@ -271,59 +271,59 @@ static void __init MP_translation_info (struct mpc_config_translation *m) * Read/parse the MPC oem tables */ -static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ - unsigned short oemsize) +static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, + unsigned short oemsize) { - int count = sizeof (*oemtable); /* the header size */ - unsigned char *oemptr = ((unsigned char *)oemtable)+count; - + int count = sizeof(*oemtable); /* the header size */ + unsigned char *oemptr = ((unsigned char *)oemtable) + count; + mpc_record = 0; - printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); - if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) - { - printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", - oemtable->oem_signature[0], - oemtable->oem_signature[1], - oemtable->oem_signature[2], - oemtable->oem_signature[3]); + printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", + oemtable); + if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { + printk(KERN_WARNING + "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", + oemtable->oem_signature[0], oemtable->oem_signature[1], + oemtable->oem_signature[2], oemtable->oem_signature[3]); return; } - if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) - { + if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); return; } while (count < oemtable->oem_length) { switch (*oemptr) { - case MP_TRANSLATION: + case MP_TRANSLATION: { - struct mpc_config_translation *m= - (struct mpc_config_translation *)oemptr; + struct mpc_config_translation *m = + (struct mpc_config_translation *)oemptr; MP_translation_info(m); oemptr += sizeof(*m); count += sizeof(*m); ++mpc_record; break; } - default: + default: { - printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); + printk(KERN_WARNING + "Unrecognised OEM table entry type! - %d\n", + (int)*oemptr); return; } } - } + } } static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) + char *productid) { if (strncmp(oem, "IBM NUMA", 8)) printk("Warning! May not be a NUMA-Q system!\n"); if (mpc->mpc_oemptr) - smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, - mpc->mpc_oemsize); + smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr, + mpc->mpc_oemsize); } -#endif /* CONFIG_X86_NUMAQ */ +#endif /* CONFIG_X86_NUMAQ */ /* * Read/parse the MPC @@ -333,34 +333,34 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) { char str[16]; char oem[10]; - int count=sizeof(*mpc); - unsigned char *mpt=((unsigned char *)mpc)+count; + int count = sizeof(*mpc); + unsigned char *mpt = ((unsigned char *)mpc) + count; - if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { + if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", - *(u32 *)mpc->mpc_signature); + *(u32 *) mpc->mpc_signature); return 0; } - if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { + if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { printk(KERN_ERR "SMP mptable: checksum error!\n"); return 0; } - if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { + if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", - mpc->mpc_spec); + mpc->mpc_spec); return 0; } if (!mpc->mpc_lapic) { printk(KERN_ERR "SMP mptable: null local APIC address!\n"); return 0; } - memcpy(oem,mpc->mpc_oem,8); - oem[8]=0; - printk(KERN_INFO "OEM ID: %s ",oem); + memcpy(oem, mpc->mpc_oem, 8); + oem[8] = 0; + printk(KERN_INFO "OEM ID: %s ", oem); - memcpy(str,mpc->mpc_productid,12); - str[12]=0; - printk("Product ID: %s ",str); + memcpy(str, mpc->mpc_productid, 12); + str[12] = 0; + printk("Product ID: %s ", str); mps_oem_check(mpc, oem, str); @@ -374,17 +374,17 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) mp_lapic_addr = mpc->mpc_lapic; /* - * Now process the configuration blocks. + * Now process the configuration blocks. */ #ifdef CONFIG_X86_NUMAQ mpc_record = 0; #endif while (count < mpc->mpc_length) { - switch(*mpt) { - case MP_PROCESSOR: + switch (*mpt) { + case MP_PROCESSOR: { - struct mpc_config_processor *m= - (struct mpc_config_processor *)mpt; + struct mpc_config_processor *m = + (struct mpc_config_processor *)mpt; /* ACPI may have already provided this data */ if (!acpi_lapic) MP_processor_info(m); @@ -392,48 +392,48 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) count += sizeof(*m); break; } - case MP_BUS: + case MP_BUS: { - struct mpc_config_bus *m= - (struct mpc_config_bus *)mpt; + struct mpc_config_bus *m = + (struct mpc_config_bus *)mpt; MP_bus_info(m); mpt += sizeof(*m); count += sizeof(*m); break; } - case MP_IOAPIC: + case MP_IOAPIC: { #ifdef CONFIG_X86_IO_APIC - struct mpc_config_ioapic *m= - (struct mpc_config_ioapic *)mpt; + struct mpc_config_ioapic *m = + (struct mpc_config_ioapic *)mpt; MP_ioapic_info(m); #endif - mpt+=sizeof(struct mpc_config_ioapic); - count+=sizeof(struct mpc_config_ioapic); + mpt += sizeof(struct mpc_config_ioapic); + count += sizeof(struct mpc_config_ioapic); break; } - case MP_INTSRC: + case MP_INTSRC: { #ifdef CONFIG_X86_IO_APIC - struct mpc_config_intsrc *m= - (struct mpc_config_intsrc *)mpt; + struct mpc_config_intsrc *m = + (struct mpc_config_intsrc *)mpt; MP_intsrc_info(m); #endif - mpt+=sizeof(struct mpc_config_intsrc); - count+=sizeof(struct mpc_config_intsrc); + mpt += sizeof(struct mpc_config_intsrc); + count += sizeof(struct mpc_config_intsrc); break; } - case MP_LINTSRC: + case MP_LINTSRC: { - struct mpc_config_lintsrc *m= - (struct mpc_config_lintsrc *)mpt; + struct mpc_config_lintsrc *m = + (struct mpc_config_lintsrc *)mpt; MP_lintsrc_info(m); - mpt+=sizeof(*m); - count+=sizeof(*m); + mpt += sizeof(*m); + count += sizeof(*m); break; } - default: + default: { count = mpc->mpc_length; break; @@ -466,7 +466,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) int ELCR_fallback = 0; intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* conforming */ + intsrc.mpc_irqflag = 0; /* conforming */ intsrc.mpc_srcbus = 0; intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; @@ -481,12 +481,16 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) * If it does, we assume it's valid. */ if (mpc_default_type == 5) { - printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); + printk(KERN_INFO + "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); - if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) - printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); + if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) + || ELCR_trigger(13)) + printk(KERN_WARNING + "ELCR contains invalid data... not using ELCR\n"); else { - printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); + printk(KERN_INFO + "Using ELCR to identify PCI interrupts\n"); ELCR_fallback = 1; } } @@ -515,13 +519,13 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) } intsrc.mpc_srcbusirq = i; - intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ + intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ MP_intsrc_info(&intsrc); } intsrc.mpc_irqtype = mp_ExtINT; intsrc.mpc_srcbusirq = 0; - intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ + intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ MP_intsrc_info(&intsrc); } @@ -551,8 +555,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.mpc_cpuflag = CPU_ENABLED; processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | - boot_cpu_data.x86_mask; + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; processor.mpc_reserved[0] = 0; processor.mpc_reserved[1] = 0; @@ -564,23 +567,23 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) bus.mpc_type = MP_BUS; bus.mpc_busid = 0; switch (mpc_default_type) { - default: - printk("???\n"); - printk(KERN_ERR "Unknown standard configuration %d\n", - mpc_default_type); - /* fall through */ - case 1: - case 5: - memcpy(bus.mpc_bustype, "ISA ", 6); - break; - case 2: - case 6: - case 3: - memcpy(bus.mpc_bustype, "EISA ", 6); - break; - case 4: - case 7: - memcpy(bus.mpc_bustype, "MCA ", 6); + default: + printk("???\n"); + printk(KERN_ERR "Unknown standard configuration %d\n", + mpc_default_type); + /* fall through */ + case 1: + case 5: + memcpy(bus.mpc_bustype, "ISA ", 6); + break; + case 2: + case 6: + case 3: + memcpy(bus.mpc_bustype, "EISA ", 6); + break; + case 4: + case 7: + memcpy(bus.mpc_bustype, "MCA ", 6); } MP_bus_info(&bus); if (mpc_default_type > 4) { @@ -603,7 +606,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) construct_default_ioirq_mptable(mpc_default_type); #endif lintsrc.mpc_type = MP_LINTSRC; - lintsrc.mpc_irqflag = 0; /* conforming */ + lintsrc.mpc_irqflag = 0; /* conforming */ lintsrc.mpc_srcbusid = 0; lintsrc.mpc_srcbusirq = 0; lintsrc.mpc_destapic = MP_APIC_ALL; @@ -619,23 +622,25 @@ static struct intel_mp_floating *mpf_found; /* * Scan the memory blocks for an SMP configuration block. */ -void __init get_smp_config (void) +void __init get_smp_config(void) { struct intel_mp_floating *mpf = mpf_found; /* - * ACPI supports both logical (e.g. Hyper-Threading) and physical + * ACPI supports both logical (e.g. Hyper-Threading) and physical * processors, where MPS only supports physical. */ if (acpi_lapic && acpi_ioapic) { - printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); + printk(KERN_INFO + "Using ACPI (MADT) for SMP configuration information\n"); return; - } - else if (acpi_lapic) - printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); + } else if (acpi_lapic) + printk(KERN_INFO + "Using ACPI for processor (LAPIC) configuration information\n"); - printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); - if (mpf->mpf_feature2 & (1<<7)) { + printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", + mpf->mpf_specification); + if (mpf->mpf_feature2 & (1 << 7)) { printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); pic_mode = 1; } else { @@ -648,7 +653,8 @@ void __init get_smp_config (void) */ if (mpf->mpf_feature1 != 0) { - printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); + printk(KERN_INFO "Default MP configuration #%d\n", + mpf->mpf_feature1); construct_default_ISA_mptable(mpf->mpf_feature1); } else if (mpf->mpf_physptr) { @@ -659,8 +665,10 @@ void __init get_smp_config (void) */ if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { smp_found_config = 0; - printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); + printk(KERN_ERR + "BIOS bug, MP table errors detected!...\n"); + printk(KERN_ERR + "... disabling SMP support. (tell your hw vendor)\n"); return; } @@ -673,7 +681,8 @@ void __init get_smp_config (void) if (!mp_irq_entries) { struct mpc_config_bus bus; - printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); + printk(KERN_ERR + "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); bus.mpc_type = MP_BUS; bus.mpc_busid = 0; @@ -692,26 +701,26 @@ void __init get_smp_config (void) */ } -static int __init smp_scan_config (unsigned long base, unsigned long length) +static int __init smp_scan_config(unsigned long base, unsigned long length) { unsigned long *bp = phys_to_virt(base); struct intel_mp_floating *mpf; - printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length); + printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp, length); if (sizeof(*mpf) != 16) printk("Error: MPF size\n"); while (length > 0) { mpf = (struct intel_mp_floating *)bp; if ((*bp == SMP_MAGIC_IDENT) && - (mpf->mpf_length == 1) && - !mpf_checksum((unsigned char *)bp, 16) && - ((mpf->mpf_specification == 1) - || (mpf->mpf_specification == 4)) ) { + (mpf->mpf_length == 1) && + !mpf_checksum((unsigned char *)bp, 16) && + ((mpf->mpf_specification == 1) + || (mpf->mpf_specification == 4))) { smp_found_config = 1; printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", - mpf, virt_to_phys(mpf)); + mpf, virt_to_phys(mpf)); reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, BOOTMEM_DEFAULT); if (mpf->mpf_physptr) { @@ -741,7 +750,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length) return 0; } -void __init find_smp_config (void) +void __init find_smp_config(void) { unsigned int address; @@ -753,9 +762,9 @@ void __init find_smp_config (void) * 2) Scan the top 1K of base RAM * 3) Scan the 64K of bios */ - if (smp_scan_config(0x0,0x400) || - smp_scan_config(639*0x400,0x400) || - smp_scan_config(0xF0000,0x10000)) + if (smp_scan_config(0x0, 0x400) || + smp_scan_config(639 * 0x400, 0x400) || + smp_scan_config(0xF0000, 0x10000)) return; /* * If it is an SMP machine we should know now, unless the @@ -792,14 +801,14 @@ void __init find_smp_config (void) extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; -static int mp_find_ioapic (int gsi) +static int mp_find_ioapic(int gsi) { int i = 0; /* Find the IOAPIC that manages this GSI. */ for (i = 0; i < nr_ioapics; i++) { if ((gsi >= mp_ioapic_routing[i].gsi_base) - && (gsi <= mp_ioapic_routing[i].gsi_end)) + && (gsi <= mp_ioapic_routing[i].gsi_end)) return i; } @@ -833,34 +842,32 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); - - /* + + /* * Build basic GSI lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI GSIs). */ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; mp_ioapic_routing[idx].gsi_base = gsi_base; mp_ioapic_routing[idx].gsi_end = gsi_base + - io_apic_get_redir_entries(idx); + io_apic_get_redir_entries(idx); printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_base, - mp_ioapic_routing[idx].gsi_end); + mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); nr_ioapics++; } -void __init -mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) +void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { struct mpc_config_intsrc intsrc; - int ioapic = -1; - int pin = -1; + int ioapic = -1; + int pin = -1; - /* + /* * Convert 'gsi' to 'ioapic.pin'. */ ioapic = mp_find_ioapic(gsi); @@ -870,7 +877,7 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) /* * TBD: This check is for faulty timer entries, where the override - * erroneously sets the trigger to level, resulting in a HUGE + * erroneously sets the trigger to level, resulting in a HUGE * increase of timer interrupts! */ if ((bus_irq == 0) && (trigger == 3)) @@ -880,13 +887,13 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) intsrc.mpc_irqtype = mp_INT; intsrc.mpc_irqflag = (trigger << 2) | polarity; intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ - intsrc.mpc_dstirq = pin; /* INTIN# */ + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", - intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); mp_irqs[mp_irq_entries] = intsrc; @@ -896,14 +903,14 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) int es7000_plat; -void __init mp_config_acpi_legacy_irqs (void) +void __init mp_config_acpi_legacy_irqs(void) { struct mpc_config_intsrc intsrc; int i = 0; int ioapic = -1; #if defined (CONFIG_MCA) || defined (CONFIG_EISA) - /* + /* * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; @@ -917,20 +924,20 @@ void __init mp_config_acpi_legacy_irqs (void) if (es7000_plat == 1) return; - /* - * Locate the IOAPIC that manages the ISA IRQs (0-15). + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). */ ioapic = mp_find_ioapic(0); if (ioapic < 0) return; intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* Conforming */ + intsrc.mpc_irqflag = 0; /* Conforming */ intsrc.mpc_srcbus = MP_ISA_BUS; #ifdef CONFIG_X86_IO_APIC intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; #endif - /* + /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. */ @@ -941,28 +948,29 @@ void __init mp_config_acpi_legacy_irqs (void) struct mpc_config_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ - if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) + if (irq->mpc_srcbus == MP_ISA_BUS + && irq->mpc_srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && - (irq->mpc_dstirq == i)) + (irq->mpc_dstirq == i)) break; } if (idx != mp_irq_entries) { printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); - continue; /* IRQ already used */ + continue; /* IRQ already used */ } intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_srcbusirq = i; /* Identity mapped */ + intsrc.mpc_srcbusirq = i; /* Identity mapped */ intsrc.mpc_dstirq = i; Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " - "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, + "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); mp_irqs[mp_irq_entries] = intsrc; @@ -985,7 +993,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) * represent all possible interrupts, and IRQs * assigned to actual devices. */ - static int gsi_to_irq[MAX_GSI_NUM]; + static int gsi_to_irq[MAX_GSI_NUM]; /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) @@ -1002,8 +1010,8 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) if (ioapic_renumber_irq) gsi = ioapic_renumber_irq(ioapic, gsi); - /* - * Avoid pin reprogramming. PRTs typically include entries + /* + * Avoid pin reprogramming. PRTs typically include entries * with redundant pin->gsi mappings (but unique PCI devices); * we only program the IOAPIC on the first. */ @@ -1011,23 +1019,23 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); if (idx > 3) { printk(KERN_ERR "Invalid reference to IOAPIC pin " - "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, - ioapic_pin); + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, + ioapic_pin); return gsi; } - if ((1<= 64, use IRQ compression */ if ((gsi >= IRQ_COMPRESSION_START) - && (triggering == ACPI_LEVEL_SENSITIVE)) { + && (triggering == ACPI_LEVEL_SENSITIVE)) { /* * For PCI devices assign IRQs in order, avoiding gaps * due to unused I/O APIC pins. @@ -1058,8 +1066,8 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) } io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, - triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, - polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, + polarity == ACPI_ACTIVE_HIGH ? 0 : 1); return gsi; } -- cgit v1.2.1 From 888032cd23f0244fdefdcbe265952e7831a9cacc Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:09 +0400 Subject: x86: add early flags to mpparse_32.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 57 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 47 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index f1c896ab8275..6c9c29621900 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -329,7 +329,7 @@ static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, * Read/parse the MPC */ -static int __init smp_read_mpc(struct mp_config_table *mpc) +static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) { char str[16]; char oem[10]; @@ -373,6 +373,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; + if (early) + return 1; + /* * Now process the configuration blocks. */ @@ -622,10 +625,13 @@ static struct intel_mp_floating *mpf_found; /* * Scan the memory blocks for an SMP configuration block. */ -void __init get_smp_config(void) +static void __init __get_smp_config(unsigned early) { struct intel_mp_floating *mpf = mpf_found; + if (acpi_lapic && early) + return; + /* * ACPI supports both logical (e.g. Hyper-Threading) and physical * processors, where MPS only supports physical. @@ -652,6 +658,13 @@ void __init get_smp_config(void) * Now see if we need to read further. */ if (mpf->mpf_feature1 != 0) { + if (early) { + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + return; + } printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); @@ -663,7 +676,7 @@ void __init get_smp_config(void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); @@ -672,6 +685,8 @@ void __init get_smp_config(void) return; } + if (early) + return; #ifdef CONFIG_X86_IO_APIC /* * If there are no explicit MP IRQ entries, then we are @@ -695,13 +710,25 @@ void __init get_smp_config(void) } else BUG(); - printk(KERN_INFO "Processors: %d\n", num_processors); + if (!early) + printk(KERN_INFO "Processors: %d\n", num_processors); /* * Only use the first configuration found. */ } -static int __init smp_scan_config(unsigned long base, unsigned long length) +void __init early_get_smp_config(void) +{ + __get_smp_config(1); +} + +void __init get_smp_config(void) +{ + __get_smp_config(0); +} + +static int __init smp_scan_config(unsigned long base, unsigned long length, + unsigned reserve) { unsigned long *bp = phys_to_virt(base); struct intel_mp_floating *mpf; @@ -750,7 +777,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) return 0; } -void __init find_smp_config(void) +static void __init __find_smp_config(unsigned reserve) { unsigned int address; @@ -762,9 +789,9 @@ void __init find_smp_config(void) * 2) Scan the top 1K of base RAM * 3) Scan the 64K of bios */ - if (smp_scan_config(0x0, 0x400) || - smp_scan_config(639 * 0x400, 0x400) || - smp_scan_config(0xF0000, 0x10000)) + if (smp_scan_config(0x0, 0x400, reserve) || + smp_scan_config(639 * 0x400, 0x400, reserve) || + smp_scan_config(0xF0000, 0x10000, reserve)) return; /* * If it is an SMP machine we should know now, unless the @@ -785,7 +812,17 @@ void __init find_smp_config(void) address = get_bios_ebda(); if (address) - smp_scan_config(address, 0x400); + smp_scan_config(address, 0x400, reserve); +} + +void __init early_find_smp_config(void) +{ + __find_smp_config(0); +} + +void __init find_smp_config(void) +{ + __find_smp_config(1); } /* -------------------------------------------------------------------------- -- cgit v1.2.1 From 746f2244065ddfbe0c5d339e309db4d2b48f185b Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:15 +0400 Subject: x86: unify arch/x86/kernel/mpparse_64.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 56 +++----------------------------------------- arch/x86/kernel/mpparse_64.c | 8 ++++++- 2 files changed, 10 insertions(+), 54 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 6c9c29621900..9a9819b2c5f5 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -82,6 +82,7 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] static void __cpuinit MP_processor_info(struct mpc_config_processor *m) { int apicid; + char *bootup_cpu = ""; if (!(m->mpc_cpuflag & CPU_ENABLED)) { disabled_cpus++; @@ -90,65 +91,14 @@ static void __cpuinit MP_processor_info(struct mpc_config_processor *m) #ifdef CONFIG_X86_NUMAQ apicid = mpc_apic_id(m, translation_table[mpc_record]); #else - Dprintk("Processor #%d %u:%u APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); apicid = m->mpc_apicid; #endif - - if (m->mpc_featureflag & (1 << 0)) - Dprintk(" Floating point unit present.\n"); - if (m->mpc_featureflag & (1 << 7)) - Dprintk(" Machine Exception supported.\n"); - if (m->mpc_featureflag & (1 << 8)) - Dprintk(" 64 bit compare & exchange supported.\n"); - if (m->mpc_featureflag & (1 << 9)) - Dprintk(" Internal APIC present.\n"); - if (m->mpc_featureflag & (1 << 11)) - Dprintk(" SEP present.\n"); - if (m->mpc_featureflag & (1 << 12)) - Dprintk(" MTRR present.\n"); - if (m->mpc_featureflag & (1 << 13)) - Dprintk(" PGE present.\n"); - if (m->mpc_featureflag & (1 << 14)) - Dprintk(" MCA present.\n"); - if (m->mpc_featureflag & (1 << 15)) - Dprintk(" CMOV present.\n"); - if (m->mpc_featureflag & (1 << 16)) - Dprintk(" PAT present.\n"); - if (m->mpc_featureflag & (1 << 17)) - Dprintk(" PSE present.\n"); - if (m->mpc_featureflag & (1 << 18)) - Dprintk(" PSN present.\n"); - if (m->mpc_featureflag & (1 << 19)) - Dprintk(" Cache Line Flush Instruction present.\n"); - /* 20 Reserved */ - if (m->mpc_featureflag & (1 << 21)) - Dprintk(" Debug Trace and EMON Store present.\n"); - if (m->mpc_featureflag & (1 << 22)) - Dprintk(" ACPI Thermal Throttle Registers present.\n"); - if (m->mpc_featureflag & (1 << 23)) - Dprintk(" MMX present.\n"); - if (m->mpc_featureflag & (1 << 24)) - Dprintk(" FXSR present.\n"); - if (m->mpc_featureflag & (1 << 25)) - Dprintk(" XMM present.\n"); - if (m->mpc_featureflag & (1 << 26)) - Dprintk(" Willamette New Instructions present.\n"); - if (m->mpc_featureflag & (1 << 27)) - Dprintk(" Self Snoop present.\n"); - if (m->mpc_featureflag & (1 << 28)) - Dprintk(" HT present.\n"); - if (m->mpc_featureflag & (1 << 29)) - Dprintk(" Thermal Monitor present.\n"); - /* 30, 31 Reserved */ - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { - Dprintk(" Bootup CPU\n"); + bootup_cpu = " (Bootup-CPU)"; boot_cpu_physical_apicid = m->mpc_apicid; } + printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); generic_processor_info(apicid, m->mpc_apicver); } diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 1c3bf80b3ba6..5c916383bb71 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -65,19 +65,25 @@ static int __init mpf_checksum(unsigned char *mp, int len) static void __cpuinit MP_processor_info(struct mpc_config_processor *m) { + int apicid; char *bootup_cpu = ""; if (!(m->mpc_cpuflag & CPU_ENABLED)) { disabled_cpus++; return; } +#ifdef CONFIG_X86_NUMAQ + apicid = mpc_apic_id(m, translation_table[mpc_record]); +#else + apicid = m->mpc_apicid; +#endif if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { bootup_cpu = " (Bootup-CPU)"; boot_cpu_physical_apicid = m->mpc_apicid; } printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); - generic_processor_info(m->mpc_apicid, 0); + generic_processor_info(apicid, m->mpc_apicver); } static void __init MP_bus_info(struct mpc_config_bus *m) -- cgit v1.2.1 From f8924e770e048429ae13bfabe1ddad9bf1e64df7 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:21 +0400 Subject: x86: unify mp_bus_info Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 15 ++++++++------- arch/x86/kernel/mpparse_64.c | 38 ++++++++++++++++++++++++++++++++------ 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 9a9819b2c5f5..9120573e2616 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -124,8 +124,12 @@ static void __init MP_bus_info(struct mpc_config_bus *m) } #endif - set_bit(m->mpc_busid, mp_bus_not_pci); - if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { + if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { + set_bit(m->mpc_busid, mp_bus_not_pci); +#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; +#endif + } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { #ifdef CONFIG_X86_NUMAQ mpc_oem_pci_bus(m, translation_table[mpc_record]); #endif @@ -134,16 +138,13 @@ static void __init MP_bus_info(struct mpc_config_bus *m) mp_current_pci_id++; #if defined(CONFIG_EISA) || defined (CONFIG_MCA) mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; - } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; - } else { - printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); #endif - } + } else + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); } #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 5c916383bb71..831097f2022a 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -92,17 +92,43 @@ static void __init MP_bus_info(struct mpc_config_bus *m) memcpy(str, m->mpc_bustype, 6); str[6] = 0; + +#ifdef CONFIG_X86_NUMAQ + mpc_oem_bus_info(m, str, translation_table[mpc_record]); +#else Dprintk("Bus #%d is %s\n", m->mpc_busid, str); +#endif + +#if MAX_MP_BUSSES < 256 + if (m->mpc_busid >= MAX_MP_BUSSES) { + printk(KERN_WARNING "MP table busid value (%d) for bustype %s " + " is too large, max. supported is %d\n", + m->mpc_busid, str, MAX_MP_BUSSES - 1); + return; + } +#endif - if (strncmp(str, "ISA", 3) == 0) { - set_bit(m->mpc_busid, mp_bus_not_pci); - } else if (strncmp(str, "PCI", 3) == 0) { + if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { + set_bit(m->mpc_busid, mp_bus_not_pci); +#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; +#endif + } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { +#ifdef CONFIG_X86_NUMAQ + mpc_oem_pci_bus(m, translation_table[mpc_record]); +#endif clear_bit(m->mpc_busid, mp_bus_not_pci); mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; mp_current_pci_id++; - } else { - printk(KERN_ERR "Unknown bustype %s\n", str); - } +#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; + } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; + } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; +#endif + } else + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); } static int bad_ioapic(unsigned long address) -- cgit v1.2.1 From e950bea8bff23c14eb38dc706aadf197ed81abf4 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:27 +0400 Subject: x86: unify smp_read_mpc Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 25 +++++++++++++------------ arch/x86/kernel/mpparse_64.c | 27 ++++++++++++++++++++++----- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 9120573e2616..c185065c3ebc 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -288,39 +288,40 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) unsigned char *mpt = ((unsigned char *)mpc) + count; if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { - printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", - *(u32 *) mpc->mpc_signature); + printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", + mpc->mpc_signature[0], mpc->mpc_signature[1], + mpc->mpc_signature[2], mpc->mpc_signature[3]); return 0; } if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { - printk(KERN_ERR "SMP mptable: checksum error!\n"); + printk(KERN_ERR "MPTABLE: checksum error!\n"); return 0; } if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { - printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", + printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", mpc->mpc_spec); return 0; } if (!mpc->mpc_lapic) { - printk(KERN_ERR "SMP mptable: null local APIC address!\n"); + printk(KERN_ERR "MPTABLE: null local APIC address!\n"); return 0; } memcpy(oem, mpc->mpc_oem, 8); oem[8] = 0; - printk(KERN_INFO "OEM ID: %s ", oem); + printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem); memcpy(str, mpc->mpc_productid, 12); str[12] = 0; printk("Product ID: %s ", str); +#ifdef CONFIG_X86_32 mps_oem_check(mpc, oem, str); +#endif + printk(KERN_INFO "MPTABLE: Product ID: %s ", str); - printk("APIC at: 0x%X\n", mpc->mpc_lapic); + printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); - /* - * Save the local APIC address (it might be non-default) -- but only - * if we're not using ACPI. - */ + /* save the local APIC address, it might be non-default */ if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; @@ -399,7 +400,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) } setup_apic_routing(); if (!num_processors) - printk(KERN_ERR "SMP mptable: no processors registered!\n"); + printk(KERN_ERR "MPTABLE: no processors registered!\n"); return num_processors; } diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 831097f2022a..d188848a893e 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -188,13 +188,13 @@ static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) { char str[16]; + char oem[10]; int count = sizeof(*mpc); unsigned char *mpt = ((unsigned char *)mpc) + count; if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", - mpc->mpc_signature[0], - mpc->mpc_signature[1], + mpc->mpc_signature[0], mpc->mpc_signature[1], mpc->mpc_signature[2], mpc->mpc_signature[3]); return 0; } @@ -211,12 +211,17 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) printk(KERN_ERR "MPTABLE: null local APIC address!\n"); return 0; } - memcpy(str, mpc->mpc_oem, 8); - str[8] = 0; - printk(KERN_INFO "MPTABLE: OEM ID: %s ", str); + memcpy(oem, mpc->mpc_oem, 8); + oem[8] = 0; + printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem); memcpy(str, mpc->mpc_productid, 12); str[12] = 0; + printk("Product ID: %s ", str); + +#ifdef CONFIG_X86_32 + mps_oem_check(mpc, oem, str); +#endif printk(KERN_INFO "MPTABLE: Product ID: %s ", str); printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); @@ -231,12 +236,16 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) /* * Now process the configuration blocks. */ +#ifdef CONFIG_X86_NUMAQ + mpc_record = 0; +#endif while (count < mpc->mpc_length) { switch (*mpt) { case MP_PROCESSOR: { struct mpc_config_processor *m = (struct mpc_config_processor *)mpt; + /* ACPI may have already provided this data */ if (!acpi_lapic) MP_processor_info(m); mpt += sizeof(*m); @@ -280,7 +289,15 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) count += sizeof(*m); break; } + default: + { + count = mpc->mpc_length; + break; + } } +#ifdef CONFIG_X86_NUMAQ + ++mpc_record; +#endif } setup_apic_routing(); if (!num_processors) -- cgit v1.2.1 From 62441bf1e0d5153dfb0cf8497df16deacff90789 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:34 +0400 Subject: x86: unify construct_default_ioirq_mptable Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 15 +++++++-------- arch/x86/kernel/mpparse_64.c | 18 ++++++++++++++---- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index c185065c3ebc..e92c29e5fd4f 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -436,13 +436,13 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) * If it does, we assume it's valid. */ if (mpc_default_type == 5) { - printk(KERN_INFO - "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); + printk(KERN_INFO "ISA/PCI bus type with no IRQ information... " + "falling back to ELCR\n"); - if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) - || ELCR_trigger(13)) - printk(KERN_WARNING - "ELCR contains invalid data... not using ELCR\n"); + if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || + ELCR_trigger(13)) + printk(KERN_ERR "ELCR contains invalid data... " + "not using ELCR\n"); else { printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); @@ -523,8 +523,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) bus.mpc_busid = 0; switch (mpc_default_type) { default: - printk("???\n"); - printk(KERN_ERR "Unknown standard configuration %d\n", + printk(KERN_ERR "???\nUnknown standard configuration %d\n", mpc_default_type); /* fall through */ case 1: diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index d188848a893e..11b74c9b8e01 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -401,10 +401,12 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) * 2 CPUs, numbered 0 & 1. */ processor.mpc_type = MP_PROCESSOR; - processor.mpc_apicver = 0; + /* Either an integrated APIC or a discrete 82489DX. */ + processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.mpc_cpuflag = CPU_ENABLED; - processor.mpc_cpufeature = 0; - processor.mpc_featureflag = 0; + processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; processor.mpc_reserved[0] = 0; processor.mpc_reserved[1] = 0; for (i = 0; i < 2; i++) { @@ -423,6 +425,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) case 5: memcpy(bus.mpc_bustype, "ISA ", 6); break; + case 2: + case 6: + case 3: + memcpy(bus.mpc_bustype, "EISA ", 6); + break; + case 4: + case 7: + memcpy(bus.mpc_bustype, "MCA ", 6); } MP_bus_info(&bus); if (mpc_default_type > 4) { @@ -433,7 +443,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) ioapic.mpc_type = MP_IOAPIC; ioapic.mpc_apicid = 2; - ioapic.mpc_apicver = 0; + ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; ioapic.mpc_flags = MPC_APIC_USABLE; ioapic.mpc_apicaddr = 0xFEC00000; MP_ioapic_info(&ioapic); -- cgit v1.2.1 From 4421b1c8b9f5da24f8c737ede2c05d399dea2015 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:40 +0400 Subject: x86: unify get_smp_config Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 20 +++++++++++--------- arch/x86/kernel/mpparse_64.c | 10 +++++++++- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index e92c29e5fd4f..bc2000ee0391 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -588,15 +588,16 @@ static void __init __get_smp_config(unsigned early) * processors, where MPS only supports physical. */ if (acpi_lapic && acpi_ioapic) { - printk(KERN_INFO - "Using ACPI (MADT) for SMP configuration information\n"); + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " + "information\n"); return; } else if (acpi_lapic) - printk(KERN_INFO - "Using ACPI for processor (LAPIC) configuration information\n"); + printk(KERN_INFO "Using ACPI for processor (LAPIC) " + "configuration information\n"); printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); +#ifdef CONFIG_X86_32 if (mpf->mpf_feature2 & (1 << 7)) { printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); pic_mode = 1; @@ -604,7 +605,7 @@ static void __init __get_smp_config(unsigned early) printk(KERN_INFO " Virtual Wire compatibility mode.\n"); pic_mode = 0; } - +#endif /* * Now see if we need to read further. */ @@ -631,8 +632,8 @@ static void __init __get_smp_config(unsigned early) smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR - "... disabling SMP support. (tell your hw vendor)\n"); + printk(KERN_ERR "... disabling SMP support. " + "(tell your hw vendor)\n"); return; } @@ -647,8 +648,9 @@ static void __init __get_smp_config(unsigned early) if (!mp_irq_entries) { struct mpc_config_bus bus; - printk(KERN_ERR - "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); + printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " + "using default mptable. " + "(tell your hw vendor)\n"); bus.mpc_type = MP_BUS; bus.mpc_busid = 0; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 11b74c9b8e01..8c7af5b7ddd4 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -490,7 +490,15 @@ static void __init __get_smp_config(unsigned early) printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); - +#ifdef CONFIG_X86_32 + if (mpf->mpf_feature2 & (1 << 7)) { + printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); + pic_mode = 1; + } else { + printk(KERN_INFO " Virtual Wire compatibility mode.\n"); + pic_mode = 0; + } +#endif /* * Now see if we need to read further. */ -- cgit v1.2.1 From 92fd4b7abdb2b5b85d73ca0adbb6ad3f8b79f805 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:46 +0400 Subject: x86: unify smp_scan_config Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 21 ++++++++++++++++----- arch/x86/kernel/mpparse_64.c | 26 +++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index bc2000ee0391..7feafa5040d8 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -683,12 +683,13 @@ void __init get_smp_config(void) static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned reserve) { - unsigned long *bp = phys_to_virt(base); + extern void __bad_mpf_size(void); + unsigned int *bp = phys_to_virt(base); struct intel_mp_floating *mpf; - printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp, length); + Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); if (sizeof(*mpf) != 16) - printk("Error: MPF size\n"); + __bad_mpf_size(); while (length > 0) { mpf = (struct intel_mp_floating *)bp; @@ -699,6 +700,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, || (mpf->mpf_specification == 4))) { smp_found_config = 1; + mpf_found = mpf; +#ifdef CONFIG_X86_32 printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", mpf, virt_to_phys(mpf)); reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, @@ -721,8 +724,16 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, BOOTMEM_DEFAULT); } - mpf_found = mpf; - return 1; +#else + if (!reserve) + return 1; + + reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); + if (mpf->mpf_physptr) + reserve_bootmem_generic(mpf->mpf_physptr, + PAGE_SIZE); +#endif + return 1; } bp += 4; length -= 16; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 8c7af5b7ddd4..9a9610089910 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -593,7 +593,30 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, smp_found_config = 1; mpf_found = mpf; +#ifdef CONFIG_X86_32 + printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", + mpf, virt_to_phys(mpf)); + reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, + BOOTMEM_DEFAULT); + if (mpf->mpf_physptr) { + /* + * We cannot access to MPC table to compute + * table size yet, as only few megabytes from + * the bottom is mapped now. + * PC-9800's MPC table places on the very last + * of physical memory; so that simply reserving + * PAGE_SIZE from mpg->mpf_physptr yields BUG() + * in reserve_bootmem. + */ + unsigned long size = PAGE_SIZE; + unsigned long end = max_low_pfn * PAGE_SIZE; + if (mpf->mpf_physptr + size > end) + size = end - mpf->mpf_physptr; + reserve_bootmem(mpf->mpf_physptr, size, + BOOTMEM_DEFAULT); + } +#else if (!reserve) return 1; @@ -601,7 +624,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, if (mpf->mpf_physptr) reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE); - return 1; +#endif + return 1; } bp += 4; length -= 16; -- cgit v1.2.1 From 22cbb4bd12a86b80125accb77515482894ee670f Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:42:53 +0400 Subject: x86: unify uniq_io_apic_id Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 14 +++++++++++++- arch/x86/kernel/mpparse_64.c | 8 ++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 7feafa5040d8..a50a31331f97 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -814,17 +814,29 @@ static int mp_find_ioapic(int gsi) } printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); - return -1; } static u8 uniq_ioapic_id(u8 id) { +#ifdef CONFIG_X86_32 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) return io_apic_get_unique_id(nr_ioapics, id); else return id; +#else + int i; + DECLARE_BITMAP(used, 256); + bitmap_zero(used, 256); + for (i = 0; i < nr_ioapics; i++) { + struct mpc_config_ioapic *ia = &mp_ioapics[i]; + __set_bit(ia->mpc_apicid, used); + } + if (!test_bit(id, used)) + return id; + return find_first_zero_bit(used, 256); +#endif } void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 9a9610089910..de64a89434c6 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -707,6 +707,13 @@ static int mp_find_ioapic(int gsi) static u8 uniq_ioapic_id(u8 id) { +#ifdef CONFIG_X86_32 + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else + return id; +#else int i; DECLARE_BITMAP(used, 256); bitmap_zero(used, 256); @@ -717,6 +724,7 @@ static u8 uniq_ioapic_id(u8 id) if (!test_bit(id, used)) return id; return find_first_zero_bit(used, 256); +#endif } void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) -- cgit v1.2.1 From 57b733e902f179dc453609db6334f8e9801cbb7a Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:43:00 +0400 Subject: x86: unify mp_register_ioapic Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 10 ++++++---- arch/x86/kernel/mpparse_64.c | 16 +++++++++------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index a50a31331f97..1ad8a5d4e3ee 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -854,8 +854,11 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); +#ifdef CONFIG_X86_32 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); - +#else + mp_ioapics[idx].mpc_apicver = 0; +#endif /* * Build basic GSI lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI GSIs). @@ -865,10 +868,9 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) mp_ioapic_routing[idx].gsi_end = gsi_base + io_apic_get_redir_entries(idx); - printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " + printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicver, - mp_ioapics[idx].mpc_apicaddr, + mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); nr_ioapics++; diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index de64a89434c6..5f1f7fdcd627 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -742,11 +742,14 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); +#ifdef CONFIG_X86_32 + mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); +#else mp_ioapics[idx].mpc_apicver = 0; - - /* - * Build basic IRQ lookup table to facilitate gsi->io_apic lookups - * and to prevent reprogramming of IOAPIC pins (PCI IRQs). +#endif + /* + * Build basic GSI lookup table to facilitate gsi->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI GSIs). */ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; mp_ioapic_routing[idx].gsi_base = gsi_base; @@ -755,9 +758,8 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, " "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_base, - mp_ioapic_routing[idx].gsi_end); + mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, + mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); nr_ioapics++; } -- cgit v1.2.1 From c769bfee5731f2614983bd7a9079eb90514e4b9f Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:43:06 +0400 Subject: x86: unify mp_config_acpi_legacy_irqs Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_64.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 5f1f7fdcd627..8ad365136a6b 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -804,19 +804,31 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) panic("Max # of irq sources exceeded!\n"); } +int es7000_plat; + void __init mp_config_acpi_legacy_irqs(void) { struct mpc_config_intsrc intsrc; int i = 0; int ioapic = -1; - /* +#if defined (CONFIG_MCA) || defined (CONFIG_EISA) + /* * Fabricate the legacy ISA bus (bus #31). */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; +#endif set_bit(MP_ISA_BUS, mp_bus_not_pci); + Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); - /* - * Locate the IOAPIC that manages the ISA IRQs (0-15). + /* + * Older generations of ES7000 have no legacy identity mappings + */ + if (es7000_plat == 1) + return; + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). */ ioapic = mp_find_ioapic(0); if (ioapic < 0) @@ -827,7 +839,7 @@ void __init mp_config_acpi_legacy_irqs(void) intsrc.mpc_srcbus = MP_ISA_BUS; intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; - /* + /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. */ -- cgit v1.2.1 From cfa08d6cc3421bb08ebaa34467107e58999a7c28 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:43:12 +0400 Subject: x86: unify mp_register_gsi Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse_32.c | 22 ++++++++++---- arch/x86/kernel/mpparse_64.c | 70 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 82 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c index 1ad8a5d4e3ee..6376791cffe5 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse_32.c @@ -994,14 +994,15 @@ void __init mp_config_acpi_legacy_irqs(void) } } -#define MAX_GSI_NUM 4096 -#define IRQ_COMPRESSION_START 64 - int mp_register_gsi(u32 gsi, int triggering, int polarity) { int ioapic = -1; int ioapic_pin = 0; int idx, bit = 0; +#ifdef CONFIG_X86_32 +#define MAX_GSI_NUM 4096 +#define IRQ_COMPRESSION_START 64 + static int pci_irq = IRQ_COMPRESSION_START; /* * Mapping between Global System Interrupts, which @@ -1009,6 +1010,11 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) * assigned to actual devices. */ static int gsi_to_irq[MAX_GSI_NUM]; +#else + + if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) + return gsi; +#endif /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) @@ -1022,8 +1028,10 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; +#ifdef CONFIG_X86_32 if (ioapic_renumber_irq) gsi = ioapic_renumber_irq(ioapic, gsi); +#endif /* * Avoid pin reprogramming. PRTs typically include entries @@ -1041,11 +1049,15 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", mp_ioapic_routing[ioapic].apic_id, ioapic_pin); +#ifdef CONFIG_X86_32 return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); +#else + return gsi; +#endif } mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); - +#ifdef CONFIG_X86_32 /* * For GSI >= 64, use IRQ compression */ @@ -1079,7 +1091,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) return gsi; } } - +#endif io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, polarity == ACPI_ACTIVE_HIGH ? 0 : 1); diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c index 8ad365136a6b..7d742577d68a 100644 --- a/arch/x86/kernel/mpparse_64.c +++ b/arch/x86/kernel/mpparse_64.c @@ -685,6 +685,8 @@ void __init find_smp_config(void) #ifdef CONFIG_ACPI +#ifdef CONFIG_X86_IO_APIC + #define MP_ISA_BUS 0 #define MP_MAX_IOAPIC_PIN 127 @@ -770,7 +772,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) int ioapic = -1; int pin = -1; - /* + /* * Convert 'gsi' to 'ioapic.pin'. */ ioapic = mp_find_ioapic(gsi); @@ -780,7 +782,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) /* * TBD: This check is for faulty timer entries, where the override - * erroneously sets the trigger to level, resulting in a HUGE + * erroneously sets the trigger to level, resulting in a HUGE * increase of timer interrupts! */ if ((bus_irq == 0) && (trigger == 3)) @@ -886,9 +888,22 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) int ioapic = -1; int ioapic_pin = 0; int idx, bit = 0; +#ifdef CONFIG_X86_32 +#define MAX_GSI_NUM 4096 +#define IRQ_COMPRESSION_START 64 + + static int pci_irq = IRQ_COMPRESSION_START; + /* + * Mapping between Global System Interrupts, which + * represent all possible interrupts, and IRQs + * assigned to actual devices. + */ + static int gsi_to_irq[MAX_GSI_NUM]; +#else if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) return gsi; +#endif /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) @@ -902,8 +917,13 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - /* - * Avoid pin reprogramming. PRTs typically include entries +#ifdef CONFIG_X86_32 + if (ioapic_renumber_irq) + gsi = ioapic_renumber_irq(ioapic, gsi); +#endif + + /* + * Avoid pin reprogramming. PRTs typically include entries * with redundant pin->gsi mappings (but unique PCI devices); * we only program the IOAPIC on the first. */ @@ -918,14 +938,54 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", mp_ioapic_routing[ioapic].apic_id, ioapic_pin); +#ifdef CONFIG_X86_32 + return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); +#else return gsi; +#endif } mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); - +#ifdef CONFIG_X86_32 + /* + * For GSI >= 64, use IRQ compression + */ + if ((gsi >= IRQ_COMPRESSION_START) + && (triggering == ACPI_LEVEL_SENSITIVE)) { + /* + * For PCI devices assign IRQs in order, avoiding gaps + * due to unused I/O APIC pins. + */ + int irq = gsi; + if (gsi < MAX_GSI_NUM) { + /* + * Retain the VIA chipset work-around (gsi > 15), but + * avoid a problem where the 8254 timer (IRQ0) is setup + * via an override (so it's not on pin 0 of the ioapic), + * and at the same time, the pin 0 interrupt is a PCI + * type. The gsi > 15 test could cause these two pins + * to be shared as IRQ0, and they are not shareable. + * So test for this condition, and if necessary, avoid + * the pin collision. + */ + gsi = pci_irq++; + /* + * Don't assign IRQ used by ACPI SCI + */ + if (gsi == acpi_gbl_FADT.sci_interrupt) + gsi = pci_irq++; + gsi_to_irq[irq] = gsi; + } else { + printk(KERN_ERR "GSI %u is too high\n", gsi); + return gsi; + } + } +#endif io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, polarity == ACPI_ACTIVE_HIGH ? 0 : 1); return gsi; } + +#endif /* CONFIG_X86_IO_APIC */ #endif /* CONFIG_ACPI */ -- cgit v1.2.1 From 85bdddec5eaeb2464bf1cad6a17225416e65a8d6 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 4 Apr 2008 23:43:18 +0400 Subject: x86: merge mpparse_{32,64}.c Signed-off-by: Alexey Starikovskiy Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/mpparse.c | 1102 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/mpparse_32.c | 1102 ------------------------------------------ arch/x86/kernel/mpparse_64.c | 991 ------------------------------------- 4 files changed, 1103 insertions(+), 2094 deletions(-) create mode 100644 arch/x86/kernel/mpparse.c delete mode 100644 arch/x86/kernel/mpparse_32.c delete mode 100644 arch/x86/kernel/mpparse_64.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0bf2fb55aa74..fdd8395e0ed3 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -51,7 +51,7 @@ obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o obj-$(CONFIG_X86_32_SMP) += smpcommon.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o -obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o +obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c new file mode 100644 index 000000000000..6e5e4547981c --- /dev/null +++ b/arch/x86/kernel/mpparse.c @@ -0,0 +1,1102 @@ +/* + * Intel Multiprocessor Specification 1.1 and 1.4 + * compliant MP-table parsing routines. + * + * (c) 1995 Alan Cox, Building #3 + * (c) 1998, 1999, 2000 Ingo Molnar + * (c) 2008 Alexey Starikovskiy + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef CONFIG_X86_32 +#include +#include +#endif + +/* Have we found an MP table */ +int smp_found_config; + +/* + * Various Linux-internal data structures created from the + * MP-table. + */ +#if defined (CONFIG_MCA) || defined (CONFIG_EISA) +int mp_bus_id_to_type[MAX_MP_BUSSES]; +#endif + +DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); +int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; + +static int mp_current_pci_id; + +int pic_mode; + +/* + * Intel MP BIOS table parsing routines: + */ + +/* + * Checksum an MP configuration block. + */ + +static int __init mpf_checksum(unsigned char *mp, int len) +{ + int sum = 0; + + while (len--) + sum += *mp++; + + return sum & 0xFF; +} + +#ifdef CONFIG_X86_NUMAQ +/* + * Have to match translation table entries to main table entries by counter + * hence the mpc_record variable .... can't see a less disgusting way of + * doing this .... + */ + +static int mpc_record; +static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] + __cpuinitdata; +#endif + +static void __cpuinit MP_processor_info(struct mpc_config_processor *m) +{ + int apicid; + char *bootup_cpu = ""; + + if (!(m->mpc_cpuflag & CPU_ENABLED)) { + disabled_cpus++; + return; + } +#ifdef CONFIG_X86_NUMAQ + apicid = mpc_apic_id(m, translation_table[mpc_record]); +#else + apicid = m->mpc_apicid; +#endif + if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { + bootup_cpu = " (Bootup-CPU)"; + boot_cpu_physical_apicid = m->mpc_apicid; + } + + printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); + generic_processor_info(apicid, m->mpc_apicver); +} + +static void __init MP_bus_info(struct mpc_config_bus *m) +{ + char str[7]; + + memcpy(str, m->mpc_bustype, 6); + str[6] = 0; + +#ifdef CONFIG_X86_NUMAQ + mpc_oem_bus_info(m, str, translation_table[mpc_record]); +#else + Dprintk("Bus #%d is %s\n", m->mpc_busid, str); +#endif + +#if MAX_MP_BUSSES < 256 + if (m->mpc_busid >= MAX_MP_BUSSES) { + printk(KERN_WARNING "MP table busid value (%d) for bustype %s " + " is too large, max. supported is %d\n", + m->mpc_busid, str, MAX_MP_BUSSES - 1); + return; + } +#endif + + if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { + set_bit(m->mpc_busid, mp_bus_not_pci); +#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; +#endif + } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { +#ifdef CONFIG_X86_NUMAQ + mpc_oem_pci_bus(m, translation_table[mpc_record]); +#endif + clear_bit(m->mpc_busid, mp_bus_not_pci); + mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; + mp_current_pci_id++; +#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; + } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; + } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { + mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; +#endif + } else + printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); +} + +#ifdef CONFIG_X86_IO_APIC + +static int bad_ioapic(unsigned long address) +{ + if (nr_ioapics >= MAX_IO_APICS) { + printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " + "(found %d)\n", MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS!\n"); + } + if (!address) { + printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" + " found in table, skipping!\n"); + return 1; + } + return 0; +} + +static void __init MP_ioapic_info(struct mpc_config_ioapic *m) +{ + if (!(m->mpc_flags & MPC_APIC_USABLE)) + return; + + printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", + m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); + + if (bad_ioapic(m->mpc_apicaddr)) + return; + + mp_ioapics[nr_ioapics] = *m; + nr_ioapics++; +} + +static void __init MP_intsrc_info(struct mpc_config_intsrc *m) +{ + mp_irqs[mp_irq_entries] = *m; + Dprintk("Int: type %d, pol %d, trig %d, bus %d," + " IRQ %02x, APIC ID %x, APIC INT %02x\n", + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, + m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!!\n"); +} + +#endif + +static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) +{ + Dprintk("Lint: type %d, pol %d, trig %d, bus %d," + " IRQ %02x, APIC ID %x, APIC LINT %02x\n", + m->mpc_irqtype, m->mpc_irqflag & 3, + (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, + m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); +} + +#ifdef CONFIG_X86_NUMAQ +static void __init MP_translation_info(struct mpc_config_translation *m) +{ + printk(KERN_INFO + "Translation: record %d, type %d, quad %d, global %d, local %d\n", + mpc_record, m->trans_type, m->trans_quad, m->trans_global, + m->trans_local); + + if (mpc_record >= MAX_MPC_ENTRY) + printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); + else + translation_table[mpc_record] = m; /* stash this for later */ + if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) + node_set_online(m->trans_quad); +} + +/* + * Read/parse the MPC oem tables + */ + +static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, + unsigned short oemsize) +{ + int count = sizeof(*oemtable); /* the header size */ + unsigned char *oemptr = ((unsigned char *)oemtable) + count; + + mpc_record = 0; + printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", + oemtable); + if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { + printk(KERN_WARNING + "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", + oemtable->oem_signature[0], oemtable->oem_signature[1], + oemtable->oem_signature[2], oemtable->oem_signature[3]); + return; + } + if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { + printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); + return; + } + while (count < oemtable->oem_length) { + switch (*oemptr) { + case MP_TRANSLATION: + { + struct mpc_config_translation *m = + (struct mpc_config_translation *)oemptr; + MP_translation_info(m); + oemptr += sizeof(*m); + count += sizeof(*m); + ++mpc_record; + break; + } + default: + { + printk(KERN_WARNING + "Unrecognised OEM table entry type! - %d\n", + (int)*oemptr); + return; + } + } + } +} + +static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, + char *productid) +{ + if (strncmp(oem, "IBM NUMA", 8)) + printk("Warning! May not be a NUMA-Q system!\n"); + if (mpc->mpc_oemptr) + smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr, + mpc->mpc_oemsize); +} +#endif /* CONFIG_X86_NUMAQ */ + +/* + * Read/parse the MPC + */ + +static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) +{ + char str[16]; + char oem[10]; + int count = sizeof(*mpc); + unsigned char *mpt = ((unsigned char *)mpc) + count; + + if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { + printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", + mpc->mpc_signature[0], mpc->mpc_signature[1], + mpc->mpc_signature[2], mpc->mpc_signature[3]); + return 0; + } + if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { + printk(KERN_ERR "MPTABLE: checksum error!\n"); + return 0; + } + if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { + printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", + mpc->mpc_spec); + return 0; + } + if (!mpc->mpc_lapic) { + printk(KERN_ERR "MPTABLE: null local APIC address!\n"); + return 0; + } + memcpy(oem, mpc->mpc_oem, 8); + oem[8] = 0; + printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem); + + memcpy(str, mpc->mpc_productid, 12); + str[12] = 0; + printk("Product ID: %s ", str); + +#ifdef CONFIG_X86_32 + mps_oem_check(mpc, oem, str); +#endif + printk(KERN_INFO "MPTABLE: Product ID: %s ", str); + + printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); + + /* save the local APIC address, it might be non-default */ + if (!acpi_lapic) + mp_lapic_addr = mpc->mpc_lapic; + + if (early) + return 1; + + /* + * Now process the configuration blocks. + */ +#ifdef CONFIG_X86_NUMAQ + mpc_record = 0; +#endif + while (count < mpc->mpc_length) { + switch (*mpt) { + case MP_PROCESSOR: + { + struct mpc_config_processor *m = + (struct mpc_config_processor *)mpt; + /* ACPI may have already provided this data */ + if (!acpi_lapic) + MP_processor_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + case MP_BUS: + { + struct mpc_config_bus *m = + (struct mpc_config_bus *)mpt; + MP_bus_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + case MP_IOAPIC: + { +#ifdef CONFIG_X86_IO_APIC + struct mpc_config_ioapic *m = + (struct mpc_config_ioapic *)mpt; + MP_ioapic_info(m); +#endif + mpt += sizeof(struct mpc_config_ioapic); + count += sizeof(struct mpc_config_ioapic); + break; + } + case MP_INTSRC: + { +#ifdef CONFIG_X86_IO_APIC + struct mpc_config_intsrc *m = + (struct mpc_config_intsrc *)mpt; + + MP_intsrc_info(m); +#endif + mpt += sizeof(struct mpc_config_intsrc); + count += sizeof(struct mpc_config_intsrc); + break; + } + case MP_LINTSRC: + { + struct mpc_config_lintsrc *m = + (struct mpc_config_lintsrc *)mpt; + MP_lintsrc_info(m); + mpt += sizeof(*m); + count += sizeof(*m); + break; + } + default: + { + count = mpc->mpc_length; + break; + } + } +#ifdef CONFIG_X86_NUMAQ + ++mpc_record; +#endif + } + setup_apic_routing(); + if (!num_processors) + printk(KERN_ERR "MPTABLE: no processors registered!\n"); + return num_processors; +} + +#ifdef CONFIG_X86_IO_APIC + +static int __init ELCR_trigger(unsigned int irq) +{ + unsigned int port; + + port = 0x4d0 + (irq >> 3); + return (inb(port) >> (irq & 7)) & 1; +} + +static void __init construct_default_ioirq_mptable(int mpc_default_type) +{ + struct mpc_config_intsrc intsrc; + int i; + int ELCR_fallback = 0; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqflag = 0; /* conforming */ + intsrc.mpc_srcbus = 0; + intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; + + intsrc.mpc_irqtype = mp_INT; + + /* + * If true, we have an ISA/PCI system with no IRQ entries + * in the MP table. To prevent the PCI interrupts from being set up + * incorrectly, we try to use the ELCR. The sanity check to see if + * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can + * never be level sensitive, so we simply see if the ELCR agrees. + * If it does, we assume it's valid. + */ + if (mpc_default_type == 5) { + printk(KERN_INFO "ISA/PCI bus type with no IRQ information... " + "falling back to ELCR\n"); + + if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || + ELCR_trigger(13)) + printk(KERN_ERR "ELCR contains invalid data... " + "not using ELCR\n"); + else { + printk(KERN_INFO + "Using ELCR to identify PCI interrupts\n"); + ELCR_fallback = 1; + } + } + + for (i = 0; i < 16; i++) { + switch (mpc_default_type) { + case 2: + if (i == 0 || i == 13) + continue; /* IRQ0 & IRQ13 not connected */ + /* fall through */ + default: + if (i == 2) + continue; /* IRQ2 is never connected */ + } + + if (ELCR_fallback) { + /* + * If the ELCR indicates a level-sensitive interrupt, we + * copy that information over to the MP table in the + * irqflag field (level sensitive, active high polarity). + */ + if (ELCR_trigger(i)) + intsrc.mpc_irqflag = 13; + else + intsrc.mpc_irqflag = 0; + } + + intsrc.mpc_srcbusirq = i; + intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ + MP_intsrc_info(&intsrc); + } + + intsrc.mpc_irqtype = mp_ExtINT; + intsrc.mpc_srcbusirq = 0; + intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ + MP_intsrc_info(&intsrc); +} + +#endif + +static inline void __init construct_default_ISA_mptable(int mpc_default_type) +{ + struct mpc_config_processor processor; + struct mpc_config_bus bus; +#ifdef CONFIG_X86_IO_APIC + struct mpc_config_ioapic ioapic; +#endif + struct mpc_config_lintsrc lintsrc; + int linttypes[2] = { mp_ExtINT, mp_NMI }; + int i; + + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + + /* + * 2 CPUs, numbered 0 & 1. + */ + processor.mpc_type = MP_PROCESSOR; + /* Either an integrated APIC or a discrete 82489DX. */ + processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; + processor.mpc_cpuflag = CPU_ENABLED; + processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; + processor.mpc_reserved[0] = 0; + processor.mpc_reserved[1] = 0; + for (i = 0; i < 2; i++) { + processor.mpc_apicid = i; + MP_processor_info(&processor); + } + + bus.mpc_type = MP_BUS; + bus.mpc_busid = 0; + switch (mpc_default_type) { + default: + printk(KERN_ERR "???\nUnknown standard configuration %d\n", + mpc_default_type); + /* fall through */ + case 1: + case 5: + memcpy(bus.mpc_bustype, "ISA ", 6); + break; + case 2: + case 6: + case 3: + memcpy(bus.mpc_bustype, "EISA ", 6); + break; + case 4: + case 7: + memcpy(bus.mpc_bustype, "MCA ", 6); + } + MP_bus_info(&bus); + if (mpc_default_type > 4) { + bus.mpc_busid = 1; + memcpy(bus.mpc_bustype, "PCI ", 6); + MP_bus_info(&bus); + } + +#ifdef CONFIG_X86_IO_APIC + ioapic.mpc_type = MP_IOAPIC; + ioapic.mpc_apicid = 2; + ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; + ioapic.mpc_flags = MPC_APIC_USABLE; + ioapic.mpc_apicaddr = 0xFEC00000; + MP_ioapic_info(&ioapic); + + /* + * We set up most of the low 16 IO-APIC pins according to MPS rules. + */ + construct_default_ioirq_mptable(mpc_default_type); +#endif + lintsrc.mpc_type = MP_LINTSRC; + lintsrc.mpc_irqflag = 0; /* conforming */ + lintsrc.mpc_srcbusid = 0; + lintsrc.mpc_srcbusirq = 0; + lintsrc.mpc_destapic = MP_APIC_ALL; + for (i = 0; i < 2; i++) { + lintsrc.mpc_irqtype = linttypes[i]; + lintsrc.mpc_destapiclint = i; + MP_lintsrc_info(&lintsrc); + } +} + +static struct intel_mp_floating *mpf_found; + +/* + * Scan the memory blocks for an SMP configuration block. + */ +static void __init __get_smp_config(unsigned early) +{ + struct intel_mp_floating *mpf = mpf_found; + + if (acpi_lapic && early) + return; + /* + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. + */ + if (acpi_lapic && acpi_ioapic) { + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " + "information\n"); + return; + } else if (acpi_lapic) + printk(KERN_INFO "Using ACPI for processor (LAPIC) " + "configuration information\n"); + + printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", + mpf->mpf_specification); +#ifdef CONFIG_X86_32 + if (mpf->mpf_feature2 & (1 << 7)) { + printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); + pic_mode = 1; + } else { + printk(KERN_INFO " Virtual Wire compatibility mode.\n"); + pic_mode = 0; + } +#endif + /* + * Now see if we need to read further. + */ + if (mpf->mpf_feature1 != 0) { + if (early) { + /* + * local APIC has default address + */ + mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + return; + } + + printk(KERN_INFO "Default MP configuration #%d\n", + mpf->mpf_feature1); + construct_default_ISA_mptable(mpf->mpf_feature1); + + } else if (mpf->mpf_physptr) { + + /* + * Read the physical hardware table. Anything here will + * override the defaults. + */ + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { + smp_found_config = 0; + printk(KERN_ERR + "BIOS bug, MP table errors detected!...\n"); + printk(KERN_ERR "... disabling SMP support. " + "(tell your hw vendor)\n"); + return; + } + + if (early) + return; +#ifdef CONFIG_X86_IO_APIC + /* + * If there are no explicit MP IRQ entries, then we are + * broken. We set up most of the low 16 IO-APIC pins to + * ISA defaults and hope it will work. + */ + if (!mp_irq_entries) { + struct mpc_config_bus bus; + + printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " + "using default mptable. " + "(tell your hw vendor)\n"); + + bus.mpc_type = MP_BUS; + bus.mpc_busid = 0; + memcpy(bus.mpc_bustype, "ISA ", 6); + MP_bus_info(&bus); + + construct_default_ioirq_mptable(0); + } +#endif + } else + BUG(); + + if (!early) + printk(KERN_INFO "Processors: %d\n", num_processors); + /* + * Only use the first configuration found. + */ +} + +void __init early_get_smp_config(void) +{ + __get_smp_config(1); +} + +void __init get_smp_config(void) +{ + __get_smp_config(0); +} + +static int __init smp_scan_config(unsigned long base, unsigned long length, + unsigned reserve) +{ + extern void __bad_mpf_size(void); + unsigned int *bp = phys_to_virt(base); + struct intel_mp_floating *mpf; + + Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); + if (sizeof(*mpf) != 16) + __bad_mpf_size(); + + while (length > 0) { + mpf = (struct intel_mp_floating *)bp; + if ((*bp == SMP_MAGIC_IDENT) && + (mpf->mpf_length == 1) && + !mpf_checksum((unsigned char *)bp, 16) && + ((mpf->mpf_specification == 1) + || (mpf->mpf_specification == 4))) { + + smp_found_config = 1; + mpf_found = mpf; +#ifdef CONFIG_X86_32 + printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", + mpf, virt_to_phys(mpf)); + reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, + BOOTMEM_DEFAULT); + if (mpf->mpf_physptr) { + /* + * We cannot access to MPC table to compute + * table size yet, as only few megabytes from + * the bottom is mapped now. + * PC-9800's MPC table places on the very last + * of physical memory; so that simply reserving + * PAGE_SIZE from mpg->mpf_physptr yields BUG() + * in reserve_bootmem. + */ + unsigned long size = PAGE_SIZE; + unsigned long end = max_low_pfn * PAGE_SIZE; + if (mpf->mpf_physptr + size > end) + size = end - mpf->mpf_physptr; + reserve_bootmem(mpf->mpf_physptr, size, + BOOTMEM_DEFAULT); + } + +#else + if (!reserve) + return 1; + + reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); + if (mpf->mpf_physptr) + reserve_bootmem_generic(mpf->mpf_physptr, + PAGE_SIZE); +#endif + return 1; + } + bp += 4; + length -= 16; + } + return 0; +} + +static void __init __find_smp_config(unsigned reserve) +{ + unsigned int address; + + /* + * FIXME: Linux assumes you have 640K of base ram.. + * this continues the error... + * + * 1) Scan the bottom 1K for a signature + * 2) Scan the top 1K of base RAM + * 3) Scan the 64K of bios + */ + if (smp_scan_config(0x0, 0x400, reserve) || + smp_scan_config(639 * 0x400, 0x400, reserve) || + smp_scan_config(0xF0000, 0x10000, reserve)) + return; + /* + * If it is an SMP machine we should know now, unless the + * configuration is in an EISA/MCA bus machine with an + * extended bios data area. + * + * there is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E, calculate and scan it here. + * + * NOTE! There are Linux loaders that will corrupt the EBDA + * area, and as such this kind of SMP config may be less + * trustworthy, simply because the SMP table may have been + * stomped on during early boot. These loaders are buggy and + * should be fixed. + * + * MP1.4 SPEC states to only scan first 1K of 4K EBDA. + */ + + address = get_bios_ebda(); + if (address) + smp_scan_config(address, 0x400, reserve); +} + +void __init early_find_smp_config(void) +{ + __find_smp_config(0); +} + +void __init find_smp_config(void) +{ + __find_smp_config(1); +} + +/* -------------------------------------------------------------------------- + ACPI-based MP Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI + +#ifdef CONFIG_X86_IO_APIC + +#define MP_ISA_BUS 0 +#define MP_MAX_IOAPIC_PIN 127 + +extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; + +static int mp_find_ioapic(int gsi) +{ + int i = 0; + + /* Find the IOAPIC that manages this GSI. */ + for (i = 0; i < nr_ioapics; i++) { + if ((gsi >= mp_ioapic_routing[i].gsi_base) + && (gsi <= mp_ioapic_routing[i].gsi_end)) + return i; + } + + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); + return -1; +} + +static u8 uniq_ioapic_id(u8 id) +{ +#ifdef CONFIG_X86_32 + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + return io_apic_get_unique_id(nr_ioapics, id); + else + return id; +#else + int i; + DECLARE_BITMAP(used, 256); + bitmap_zero(used, 256); + for (i = 0; i < nr_ioapics; i++) { + struct mpc_config_ioapic *ia = &mp_ioapics[i]; + __set_bit(ia->mpc_apicid, used); + } + if (!test_bit(id, used)) + return id; + return find_first_zero_bit(used, 256); +#endif +} + +void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) +{ + int idx = 0; + + if (bad_ioapic(address)) + return; + + idx = nr_ioapics; + + mp_ioapics[idx].mpc_type = MP_IOAPIC; + mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; + mp_ioapics[idx].mpc_apicaddr = address; + + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); + mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); +#ifdef CONFIG_X86_32 + mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); +#else + mp_ioapics[idx].mpc_apicver = 0; +#endif + /* + * Build basic GSI lookup table to facilitate gsi->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI GSIs). + */ + mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; + mp_ioapic_routing[idx].gsi_base = gsi_base; + mp_ioapic_routing[idx].gsi_end = gsi_base + + io_apic_get_redir_entries(idx); + + printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " + "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, + mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, + mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); + + nr_ioapics++; +} + +void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) +{ + struct mpc_config_intsrc intsrc; + int ioapic = -1; + int pin = -1; + + /* + * Convert 'gsi' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) + return; + pin = gsi - mp_ioapic_routing[ioapic].gsi_base; + + /* + * TBD: This check is for faulty timer entries, where the override + * erroneously sets the trigger to level, resulting in a HUGE + * increase of timer interrupts! + */ + if ((bus_irq == 0) && (trigger == 3)) + trigger = 1; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_irqflag = (trigger << 2) | polarity; + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); + + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); +} + +int es7000_plat; + +void __init mp_config_acpi_legacy_irqs(void) +{ + struct mpc_config_intsrc intsrc; + int i = 0; + int ioapic = -1; + +#if defined (CONFIG_MCA) || defined (CONFIG_EISA) + /* + * Fabricate the legacy ISA bus (bus #31). + */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; +#endif + set_bit(MP_ISA_BUS, mp_bus_not_pci); + Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); + + /* + * Older generations of ES7000 have no legacy identity mappings + */ + if (es7000_plat == 1) + return; + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). + */ + ioapic = mp_find_ioapic(0); + if (ioapic < 0) + return; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqflag = 0; /* Conforming */ + intsrc.mpc_srcbus = MP_ISA_BUS; +#ifdef CONFIG_X86_IO_APIC + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; +#endif + /* + * Use the default configuration for the IRQs 0-15. Unless + * overridden by (MADT) interrupt source override entries. + */ + for (i = 0; i < 16; i++) { + int idx; + + for (idx = 0; idx < mp_irq_entries; idx++) { + struct mpc_config_intsrc *irq = mp_irqs + idx; + + /* Do we already have a mapping for this ISA IRQ? */ + if (irq->mpc_srcbus == MP_ISA_BUS + && irq->mpc_srcbusirq == i) + break; + + /* Do we already have a mapping for this IOAPIC pin */ + if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && + (irq->mpc_dstirq == i)) + break; + } + + if (idx != mp_irq_entries) { + printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); + continue; /* IRQ already used */ + } + + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_srcbusirq = i; /* Identity mapped */ + intsrc.mpc_dstirq = i; + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " + "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, + intsrc.mpc_dstirq); + + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } +} + +int mp_register_gsi(u32 gsi, int triggering, int polarity) +{ + int ioapic = -1; + int ioapic_pin = 0; + int idx, bit = 0; +#ifdef CONFIG_X86_32 +#define MAX_GSI_NUM 4096 +#define IRQ_COMPRESSION_START 64 + + static int pci_irq = IRQ_COMPRESSION_START; + /* + * Mapping between Global System Interrupts, which + * represent all possible interrupts, and IRQs + * assigned to actual devices. + */ + static int gsi_to_irq[MAX_GSI_NUM]; +#else + + if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) + return gsi; +#endif + + /* Don't set up the ACPI SCI because it's already set up */ + if (acpi_gbl_FADT.sci_interrupt == gsi) + return gsi; + + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) { + printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); + return gsi; + } + + ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; + +#ifdef CONFIG_X86_32 + if (ioapic_renumber_irq) + gsi = ioapic_renumber_irq(ioapic, gsi); +#endif + + /* + * Avoid pin reprogramming. PRTs typically include entries + * with redundant pin->gsi mappings (but unique PCI devices); + * we only program the IOAPIC on the first. + */ + bit = ioapic_pin % 32; + idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); + if (idx > 3) { + printk(KERN_ERR "Invalid reference to IOAPIC pin " + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, + ioapic_pin); + return gsi; + } + if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { + Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", + mp_ioapic_routing[ioapic].apic_id, ioapic_pin); +#ifdef CONFIG_X86_32 + return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); +#else + return gsi; +#endif + } + + mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); +#ifdef CONFIG_X86_32 + /* + * For GSI >= 64, use IRQ compression + */ + if ((gsi >= IRQ_COMPRESSION_START) + && (triggering == ACPI_LEVEL_SENSITIVE)) { + /* + * For PCI devices assign IRQs in order, avoiding gaps + * due to unused I/O APIC pins. + */ + int irq = gsi; + if (gsi < MAX_GSI_NUM) { + /* + * Retain the VIA chipset work-around (gsi > 15), but + * avoid a problem where the 8254 timer (IRQ0) is setup + * via an override (so it's not on pin 0 of the ioapic), + * and at the same time, the pin 0 interrupt is a PCI + * type. The gsi > 15 test could cause these two pins + * to be shared as IRQ0, and they are not shareable. + * So test for this condition, and if necessary, avoid + * the pin collision. + */ + gsi = pci_irq++; + /* + * Don't assign IRQ used by ACPI SCI + */ + if (gsi == acpi_gbl_FADT.sci_interrupt) + gsi = pci_irq++; + gsi_to_irq[irq] = gsi; + } else { + printk(KERN_ERR "GSI %u is too high\n", gsi); + return gsi; + } + } +#endif + io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, + triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, + polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + return gsi; +} + +#endif /* CONFIG_X86_IO_APIC */ +#endif /* CONFIG_ACPI */ diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c deleted file mode 100644 index 6376791cffe5..000000000000 --- a/arch/x86/kernel/mpparse_32.c +++ /dev/null @@ -1,1102 +0,0 @@ -/* - * Intel Multiprocessor Specification 1.1 and 1.4 - * compliant MP-table parsing routines. - * - * (c) 1995 Alan Cox, Building #3 - * (c) 1998, 1999, 2000 Ingo Molnar - * - * Fixes - * Erich Boleyn : MP v1.4 and additional changes. - * Alan Cox : Added EBDA scanning - * Ingo Molnar : various cleanups and rewrites - * Maciej W. Rozycki: Bits for default MP configurations - * Paul Diefenbaugh: Added full ACPI support - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/* Have we found an MP table */ -int smp_found_config; - -/* - * Various Linux-internal data structures created from the - * MP-table. - */ -#if defined (CONFIG_MCA) || defined (CONFIG_EISA) -int mp_bus_id_to_type[MAX_MP_BUSSES]; -#endif -DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); -int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; -static int mp_current_pci_id; - -int pic_mode; - -/* - * Intel MP BIOS table parsing routines: - */ - -/* - * Checksum an MP configuration block. - */ - -static int __init mpf_checksum(unsigned char *mp, int len) -{ - int sum = 0; - - while (len--) - sum += *mp++; - - return sum & 0xFF; -} - -#ifdef CONFIG_X86_NUMAQ -/* - * Have to match translation table entries to main table entries by counter - * hence the mpc_record variable .... can't see a less disgusting way of - * doing this .... - */ - -static int mpc_record; -static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] - __cpuinitdata; -#endif - -static void __cpuinit MP_processor_info(struct mpc_config_processor *m) -{ - int apicid; - char *bootup_cpu = ""; - - if (!(m->mpc_cpuflag & CPU_ENABLED)) { - disabled_cpus++; - return; - } -#ifdef CONFIG_X86_NUMAQ - apicid = mpc_apic_id(m, translation_table[mpc_record]); -#else - apicid = m->mpc_apicid; -#endif - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { - bootup_cpu = " (Bootup-CPU)"; - boot_cpu_physical_apicid = m->mpc_apicid; - } - - printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); - generic_processor_info(apicid, m->mpc_apicver); -} - -static void __init MP_bus_info(struct mpc_config_bus *m) -{ - char str[7]; - - memcpy(str, m->mpc_bustype, 6); - str[6] = 0; - -#ifdef CONFIG_X86_NUMAQ - mpc_oem_bus_info(m, str, translation_table[mpc_record]); -#else - Dprintk("Bus #%d is %s\n", m->mpc_busid, str); -#endif - -#if MAX_MP_BUSSES < 256 - if (m->mpc_busid >= MAX_MP_BUSSES) { - printk(KERN_WARNING "MP table busid value (%d) for bustype %s " - " is too large, max. supported is %d\n", - m->mpc_busid, str, MAX_MP_BUSSES - 1); - return; - } -#endif - - if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { - set_bit(m->mpc_busid, mp_bus_not_pci); -#if defined(CONFIG_EISA) || defined (CONFIG_MCA) - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; -#endif - } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { -#ifdef CONFIG_X86_NUMAQ - mpc_oem_pci_bus(m, translation_table[mpc_record]); -#endif - clear_bit(m->mpc_busid, mp_bus_not_pci); - mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; - mp_current_pci_id++; -#if defined(CONFIG_EISA) || defined (CONFIG_MCA) - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; - } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; - } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; -#endif - } else - printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); -} - -#ifdef CONFIG_X86_IO_APIC - -static int bad_ioapic(unsigned long address) -{ - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " - "(found %d)\n", MAX_IO_APICS, nr_ioapics); - panic("Recompile kernel with bigger MAX_IO_APICS!\n"); - } - if (!address) { - printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" - " found in table, skipping!\n"); - return 1; - } - return 0; -} - -static void __init MP_ioapic_info(struct mpc_config_ioapic *m) -{ - if (!(m->mpc_flags & MPC_APIC_USABLE)) - return; - - printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", - m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); - - if (bad_ioapic(m->mpc_apicaddr)) - return; - - mp_ioapics[nr_ioapics] = *m; - nr_ioapics++; -} - -static void __init MP_intsrc_info(struct mpc_config_intsrc *m) -{ - mp_irqs[mp_irq_entries] = *m; - Dprintk("Int: type %d, pol %d, trig %d, bus %d," - " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, - m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded!!\n"); -} - -#endif - -static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) -{ - Dprintk("Lint: type %d, pol %d, trig %d, bus %d," - " IRQ %02x, APIC ID %x, APIC LINT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, - m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); -} - -#ifdef CONFIG_X86_NUMAQ -static void __init MP_translation_info(struct mpc_config_translation *m) -{ - printk(KERN_INFO - "Translation: record %d, type %d, quad %d, global %d, local %d\n", - mpc_record, m->trans_type, m->trans_quad, m->trans_global, - m->trans_local); - - if (mpc_record >= MAX_MPC_ENTRY) - printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); - else - translation_table[mpc_record] = m; /* stash this for later */ - if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) - node_set_online(m->trans_quad); -} - -/* - * Read/parse the MPC oem tables - */ - -static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, - unsigned short oemsize) -{ - int count = sizeof(*oemtable); /* the header size */ - unsigned char *oemptr = ((unsigned char *)oemtable) + count; - - mpc_record = 0; - printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", - oemtable); - if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { - printk(KERN_WARNING - "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", - oemtable->oem_signature[0], oemtable->oem_signature[1], - oemtable->oem_signature[2], oemtable->oem_signature[3]); - return; - } - if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { - printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); - return; - } - while (count < oemtable->oem_length) { - switch (*oemptr) { - case MP_TRANSLATION: - { - struct mpc_config_translation *m = - (struct mpc_config_translation *)oemptr; - MP_translation_info(m); - oemptr += sizeof(*m); - count += sizeof(*m); - ++mpc_record; - break; - } - default: - { - printk(KERN_WARNING - "Unrecognised OEM table entry type! - %d\n", - (int)*oemptr); - return; - } - } - } -} - -static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) -{ - if (strncmp(oem, "IBM NUMA", 8)) - printk("Warning! May not be a NUMA-Q system!\n"); - if (mpc->mpc_oemptr) - smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr, - mpc->mpc_oemsize); -} -#endif /* CONFIG_X86_NUMAQ */ - -/* - * Read/parse the MPC - */ - -static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) -{ - char str[16]; - char oem[10]; - int count = sizeof(*mpc); - unsigned char *mpt = ((unsigned char *)mpc) + count; - - if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { - printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", - mpc->mpc_signature[0], mpc->mpc_signature[1], - mpc->mpc_signature[2], mpc->mpc_signature[3]); - return 0; - } - if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { - printk(KERN_ERR "MPTABLE: checksum error!\n"); - return 0; - } - if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { - printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", - mpc->mpc_spec); - return 0; - } - if (!mpc->mpc_lapic) { - printk(KERN_ERR "MPTABLE: null local APIC address!\n"); - return 0; - } - memcpy(oem, mpc->mpc_oem, 8); - oem[8] = 0; - printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem); - - memcpy(str, mpc->mpc_productid, 12); - str[12] = 0; - printk("Product ID: %s ", str); - -#ifdef CONFIG_X86_32 - mps_oem_check(mpc, oem, str); -#endif - printk(KERN_INFO "MPTABLE: Product ID: %s ", str); - - printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); - - /* save the local APIC address, it might be non-default */ - if (!acpi_lapic) - mp_lapic_addr = mpc->mpc_lapic; - - if (early) - return 1; - - /* - * Now process the configuration blocks. - */ -#ifdef CONFIG_X86_NUMAQ - mpc_record = 0; -#endif - while (count < mpc->mpc_length) { - switch (*mpt) { - case MP_PROCESSOR: - { - struct mpc_config_processor *m = - (struct mpc_config_processor *)mpt; - /* ACPI may have already provided this data */ - if (!acpi_lapic) - MP_processor_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_BUS: - { - struct mpc_config_bus *m = - (struct mpc_config_bus *)mpt; - MP_bus_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_IOAPIC: - { -#ifdef CONFIG_X86_IO_APIC - struct mpc_config_ioapic *m = - (struct mpc_config_ioapic *)mpt; - MP_ioapic_info(m); -#endif - mpt += sizeof(struct mpc_config_ioapic); - count += sizeof(struct mpc_config_ioapic); - break; - } - case MP_INTSRC: - { -#ifdef CONFIG_X86_IO_APIC - struct mpc_config_intsrc *m = - (struct mpc_config_intsrc *)mpt; - - MP_intsrc_info(m); -#endif - mpt += sizeof(struct mpc_config_intsrc); - count += sizeof(struct mpc_config_intsrc); - break; - } - case MP_LINTSRC: - { - struct mpc_config_lintsrc *m = - (struct mpc_config_lintsrc *)mpt; - MP_lintsrc_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - default: - { - count = mpc->mpc_length; - break; - } - } -#ifdef CONFIG_X86_NUMAQ - ++mpc_record; -#endif - } - setup_apic_routing(); - if (!num_processors) - printk(KERN_ERR "MPTABLE: no processors registered!\n"); - return num_processors; -} - -#ifdef CONFIG_X86_IO_APIC - -static int __init ELCR_trigger(unsigned int irq) -{ - unsigned int port; - - port = 0x4d0 + (irq >> 3); - return (inb(port) >> (irq & 7)) & 1; -} - -static void __init construct_default_ioirq_mptable(int mpc_default_type) -{ - struct mpc_config_intsrc intsrc; - int i; - int ELCR_fallback = 0; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* conforming */ - intsrc.mpc_srcbus = 0; - intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; - - intsrc.mpc_irqtype = mp_INT; - - /* - * If true, we have an ISA/PCI system with no IRQ entries - * in the MP table. To prevent the PCI interrupts from being set up - * incorrectly, we try to use the ELCR. The sanity check to see if - * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can - * never be level sensitive, so we simply see if the ELCR agrees. - * If it does, we assume it's valid. - */ - if (mpc_default_type == 5) { - printk(KERN_INFO "ISA/PCI bus type with no IRQ information... " - "falling back to ELCR\n"); - - if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || - ELCR_trigger(13)) - printk(KERN_ERR "ELCR contains invalid data... " - "not using ELCR\n"); - else { - printk(KERN_INFO - "Using ELCR to identify PCI interrupts\n"); - ELCR_fallback = 1; - } - } - - for (i = 0; i < 16; i++) { - switch (mpc_default_type) { - case 2: - if (i == 0 || i == 13) - continue; /* IRQ0 & IRQ13 not connected */ - /* fall through */ - default: - if (i == 2) - continue; /* IRQ2 is never connected */ - } - - if (ELCR_fallback) { - /* - * If the ELCR indicates a level-sensitive interrupt, we - * copy that information over to the MP table in the - * irqflag field (level sensitive, active high polarity). - */ - if (ELCR_trigger(i)) - intsrc.mpc_irqflag = 13; - else - intsrc.mpc_irqflag = 0; - } - - intsrc.mpc_srcbusirq = i; - intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ - MP_intsrc_info(&intsrc); - } - - intsrc.mpc_irqtype = mp_ExtINT; - intsrc.mpc_srcbusirq = 0; - intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ - MP_intsrc_info(&intsrc); -} - -#endif - -static inline void __init construct_default_ISA_mptable(int mpc_default_type) -{ - struct mpc_config_processor processor; - struct mpc_config_bus bus; -#ifdef CONFIG_X86_IO_APIC - struct mpc_config_ioapic ioapic; -#endif - struct mpc_config_lintsrc lintsrc; - int linttypes[2] = { mp_ExtINT, mp_NMI }; - int i; - - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - - /* - * 2 CPUs, numbered 0 & 1. - */ - processor.mpc_type = MP_PROCESSOR; - /* Either an integrated APIC or a discrete 82489DX. */ - processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - processor.mpc_cpuflag = CPU_ENABLED; - processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; - processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; - processor.mpc_reserved[0] = 0; - processor.mpc_reserved[1] = 0; - for (i = 0; i < 2; i++) { - processor.mpc_apicid = i; - MP_processor_info(&processor); - } - - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - switch (mpc_default_type) { - default: - printk(KERN_ERR "???\nUnknown standard configuration %d\n", - mpc_default_type); - /* fall through */ - case 1: - case 5: - memcpy(bus.mpc_bustype, "ISA ", 6); - break; - case 2: - case 6: - case 3: - memcpy(bus.mpc_bustype, "EISA ", 6); - break; - case 4: - case 7: - memcpy(bus.mpc_bustype, "MCA ", 6); - } - MP_bus_info(&bus); - if (mpc_default_type > 4) { - bus.mpc_busid = 1; - memcpy(bus.mpc_bustype, "PCI ", 6); - MP_bus_info(&bus); - } - -#ifdef CONFIG_X86_IO_APIC - ioapic.mpc_type = MP_IOAPIC; - ioapic.mpc_apicid = 2; - ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - ioapic.mpc_flags = MPC_APIC_USABLE; - ioapic.mpc_apicaddr = 0xFEC00000; - MP_ioapic_info(&ioapic); - - /* - * We set up most of the low 16 IO-APIC pins according to MPS rules. - */ - construct_default_ioirq_mptable(mpc_default_type); -#endif - lintsrc.mpc_type = MP_LINTSRC; - lintsrc.mpc_irqflag = 0; /* conforming */ - lintsrc.mpc_srcbusid = 0; - lintsrc.mpc_srcbusirq = 0; - lintsrc.mpc_destapic = MP_APIC_ALL; - for (i = 0; i < 2; i++) { - lintsrc.mpc_irqtype = linttypes[i]; - lintsrc.mpc_destapiclint = i; - MP_lintsrc_info(&lintsrc); - } -} - -static struct intel_mp_floating *mpf_found; - -/* - * Scan the memory blocks for an SMP configuration block. - */ -static void __init __get_smp_config(unsigned early) -{ - struct intel_mp_floating *mpf = mpf_found; - - if (acpi_lapic && early) - return; - - /* - * ACPI supports both logical (e.g. Hyper-Threading) and physical - * processors, where MPS only supports physical. - */ - if (acpi_lapic && acpi_ioapic) { - printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " - "information\n"); - return; - } else if (acpi_lapic) - printk(KERN_INFO "Using ACPI for processor (LAPIC) " - "configuration information\n"); - - printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", - mpf->mpf_specification); -#ifdef CONFIG_X86_32 - if (mpf->mpf_feature2 & (1 << 7)) { - printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); - pic_mode = 1; - } else { - printk(KERN_INFO " Virtual Wire compatibility mode.\n"); - pic_mode = 0; - } -#endif - /* - * Now see if we need to read further. - */ - if (mpf->mpf_feature1 != 0) { - if (early) { - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - return; - } - - printk(KERN_INFO "Default MP configuration #%d\n", - mpf->mpf_feature1); - construct_default_ISA_mptable(mpf->mpf_feature1); - - } else if (mpf->mpf_physptr) { - - /* - * Read the physical hardware table. Anything here will - * override the defaults. - */ - if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { - smp_found_config = 0; - printk(KERN_ERR - "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR "... disabling SMP support. " - "(tell your hw vendor)\n"); - return; - } - - if (early) - return; -#ifdef CONFIG_X86_IO_APIC - /* - * If there are no explicit MP IRQ entries, then we are - * broken. We set up most of the low 16 IO-APIC pins to - * ISA defaults and hope it will work. - */ - if (!mp_irq_entries) { - struct mpc_config_bus bus; - - printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " - "using default mptable. " - "(tell your hw vendor)\n"); - - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - memcpy(bus.mpc_bustype, "ISA ", 6); - MP_bus_info(&bus); - - construct_default_ioirq_mptable(0); - } -#endif - } else - BUG(); - - if (!early) - printk(KERN_INFO "Processors: %d\n", num_processors); - /* - * Only use the first configuration found. - */ -} - -void __init early_get_smp_config(void) -{ - __get_smp_config(1); -} - -void __init get_smp_config(void) -{ - __get_smp_config(0); -} - -static int __init smp_scan_config(unsigned long base, unsigned long length, - unsigned reserve) -{ - extern void __bad_mpf_size(void); - unsigned int *bp = phys_to_virt(base); - struct intel_mp_floating *mpf; - - Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); - if (sizeof(*mpf) != 16) - __bad_mpf_size(); - - while (length > 0) { - mpf = (struct intel_mp_floating *)bp; - if ((*bp == SMP_MAGIC_IDENT) && - (mpf->mpf_length == 1) && - !mpf_checksum((unsigned char *)bp, 16) && - ((mpf->mpf_specification == 1) - || (mpf->mpf_specification == 4))) { - - smp_found_config = 1; - mpf_found = mpf; -#ifdef CONFIG_X86_32 - printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", - mpf, virt_to_phys(mpf)); - reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, - BOOTMEM_DEFAULT); - if (mpf->mpf_physptr) { - /* - * We cannot access to MPC table to compute - * table size yet, as only few megabytes from - * the bottom is mapped now. - * PC-9800's MPC table places on the very last - * of physical memory; so that simply reserving - * PAGE_SIZE from mpg->mpf_physptr yields BUG() - * in reserve_bootmem. - */ - unsigned long size = PAGE_SIZE; - unsigned long end = max_low_pfn * PAGE_SIZE; - if (mpf->mpf_physptr + size > end) - size = end - mpf->mpf_physptr; - reserve_bootmem(mpf->mpf_physptr, size, - BOOTMEM_DEFAULT); - } - -#else - if (!reserve) - return 1; - - reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); - if (mpf->mpf_physptr) - reserve_bootmem_generic(mpf->mpf_physptr, - PAGE_SIZE); -#endif - return 1; - } - bp += 4; - length -= 16; - } - return 0; -} - -static void __init __find_smp_config(unsigned reserve) -{ - unsigned int address; - - /* - * FIXME: Linux assumes you have 640K of base ram.. - * this continues the error... - * - * 1) Scan the bottom 1K for a signature - * 2) Scan the top 1K of base RAM - * 3) Scan the 64K of bios - */ - if (smp_scan_config(0x0, 0x400, reserve) || - smp_scan_config(639 * 0x400, 0x400, reserve) || - smp_scan_config(0xF0000, 0x10000, reserve)) - return; - /* - * If it is an SMP machine we should know now, unless the - * configuration is in an EISA/MCA bus machine with an - * extended bios data area. - * - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E, calculate and scan it here. - * - * NOTE! There are Linux loaders that will corrupt the EBDA - * area, and as such this kind of SMP config may be less - * trustworthy, simply because the SMP table may have been - * stomped on during early boot. These loaders are buggy and - * should be fixed. - * - * MP1.4 SPEC states to only scan first 1K of 4K EBDA. - */ - - address = get_bios_ebda(); - if (address) - smp_scan_config(address, 0x400, reserve); -} - -void __init early_find_smp_config(void) -{ - __find_smp_config(0); -} - -void __init find_smp_config(void) -{ - __find_smp_config(1); -} - -/* -------------------------------------------------------------------------- - ACPI-based MP Configuration - -------------------------------------------------------------------------- */ - -#ifdef CONFIG_ACPI - -#ifdef CONFIG_X86_IO_APIC - -#define MP_ISA_BUS 0 -#define MP_MAX_IOAPIC_PIN 127 - -extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; - -static int mp_find_ioapic(int gsi) -{ - int i = 0; - - /* Find the IOAPIC that manages this GSI. */ - for (i = 0; i < nr_ioapics; i++) { - if ((gsi >= mp_ioapic_routing[i].gsi_base) - && (gsi <= mp_ioapic_routing[i].gsi_end)) - return i; - } - - printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); - return -1; -} - -static u8 uniq_ioapic_id(u8 id) -{ -#ifdef CONFIG_X86_32 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && - !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - return io_apic_get_unique_id(nr_ioapics, id); - else - return id; -#else - int i; - DECLARE_BITMAP(used, 256); - bitmap_zero(used, 256); - for (i = 0; i < nr_ioapics; i++) { - struct mpc_config_ioapic *ia = &mp_ioapics[i]; - __set_bit(ia->mpc_apicid, used); - } - if (!test_bit(id, used)) - return id; - return find_first_zero_bit(used, 256); -#endif -} - -void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) -{ - int idx = 0; - - if (bad_ioapic(address)) - return; - - idx = nr_ioapics; - - mp_ioapics[idx].mpc_type = MP_IOAPIC; - mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; - mp_ioapics[idx].mpc_apicaddr = address; - - set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); -#ifdef CONFIG_X86_32 - mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); -#else - mp_ioapics[idx].mpc_apicver = 0; -#endif - /* - * Build basic GSI lookup table to facilitate gsi->io_apic lookups - * and to prevent reprogramming of IOAPIC pins (PCI GSIs). - */ - mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; - mp_ioapic_routing[idx].gsi_base = gsi_base; - mp_ioapic_routing[idx].gsi_end = gsi_base + - io_apic_get_redir_entries(idx); - - printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " - "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); - - nr_ioapics++; -} - -void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) -{ - struct mpc_config_intsrc intsrc; - int ioapic = -1; - int pin = -1; - - /* - * Convert 'gsi' to 'ioapic.pin'. - */ - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) - return; - pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - - /* - * TBD: This check is for faulty timer entries, where the override - * erroneously sets the trigger to level, resulting in a HUGE - * increase of timer interrupts! - */ - if ((bus_irq == 0) && (trigger == 3)) - trigger = 1; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_irqflag = (trigger << 2) | polarity; - intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ - intsrc.mpc_dstirq = pin; /* INTIN# */ - - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", - intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); - - mp_irqs[mp_irq_entries] = intsrc; - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded!\n"); -} - -int es7000_plat; - -void __init mp_config_acpi_legacy_irqs(void) -{ - struct mpc_config_intsrc intsrc; - int i = 0; - int ioapic = -1; - -#if defined (CONFIG_MCA) || defined (CONFIG_EISA) - /* - * Fabricate the legacy ISA bus (bus #31). - */ - mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; -#endif - set_bit(MP_ISA_BUS, mp_bus_not_pci); - Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); - - /* - * Older generations of ES7000 have no legacy identity mappings - */ - if (es7000_plat == 1) - return; - - /* - * Locate the IOAPIC that manages the ISA IRQs (0-15). - */ - ioapic = mp_find_ioapic(0); - if (ioapic < 0) - return; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* Conforming */ - intsrc.mpc_srcbus = MP_ISA_BUS; -#ifdef CONFIG_X86_IO_APIC - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; -#endif - /* - * Use the default configuration for the IRQs 0-15. Unless - * overridden by (MADT) interrupt source override entries. - */ - for (i = 0; i < 16; i++) { - int idx; - - for (idx = 0; idx < mp_irq_entries; idx++) { - struct mpc_config_intsrc *irq = mp_irqs + idx; - - /* Do we already have a mapping for this ISA IRQ? */ - if (irq->mpc_srcbus == MP_ISA_BUS - && irq->mpc_srcbusirq == i) - break; - - /* Do we already have a mapping for this IOAPIC pin */ - if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && - (irq->mpc_dstirq == i)) - break; - } - - if (idx != mp_irq_entries) { - printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); - continue; /* IRQ already used */ - } - - intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_srcbusirq = i; /* Identity mapped */ - intsrc.mpc_dstirq = i; - - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " - "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, - intsrc.mpc_dstirq); - - mp_irqs[mp_irq_entries] = intsrc; - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded!\n"); - } -} - -int mp_register_gsi(u32 gsi, int triggering, int polarity) -{ - int ioapic = -1; - int ioapic_pin = 0; - int idx, bit = 0; -#ifdef CONFIG_X86_32 -#define MAX_GSI_NUM 4096 -#define IRQ_COMPRESSION_START 64 - - static int pci_irq = IRQ_COMPRESSION_START; - /* - * Mapping between Global System Interrupts, which - * represent all possible interrupts, and IRQs - * assigned to actual devices. - */ - static int gsi_to_irq[MAX_GSI_NUM]; -#else - - if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) - return gsi; -#endif - - /* Don't set up the ACPI SCI because it's already set up */ - if (acpi_gbl_FADT.sci_interrupt == gsi) - return gsi; - - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) { - printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); - return gsi; - } - - ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - -#ifdef CONFIG_X86_32 - if (ioapic_renumber_irq) - gsi = ioapic_renumber_irq(ioapic, gsi); -#endif - - /* - * Avoid pin reprogramming. PRTs typically include entries - * with redundant pin->gsi mappings (but unique PCI devices); - * we only program the IOAPIC on the first. - */ - bit = ioapic_pin % 32; - idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); - if (idx > 3) { - printk(KERN_ERR "Invalid reference to IOAPIC pin " - "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, - ioapic_pin); - return gsi; - } - if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { - Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", - mp_ioapic_routing[ioapic].apic_id, ioapic_pin); -#ifdef CONFIG_X86_32 - return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); -#else - return gsi; -#endif - } - - mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); -#ifdef CONFIG_X86_32 - /* - * For GSI >= 64, use IRQ compression - */ - if ((gsi >= IRQ_COMPRESSION_START) - && (triggering == ACPI_LEVEL_SENSITIVE)) { - /* - * For PCI devices assign IRQs in order, avoiding gaps - * due to unused I/O APIC pins. - */ - int irq = gsi; - if (gsi < MAX_GSI_NUM) { - /* - * Retain the VIA chipset work-around (gsi > 15), but - * avoid a problem where the 8254 timer (IRQ0) is setup - * via an override (so it's not on pin 0 of the ioapic), - * and at the same time, the pin 0 interrupt is a PCI - * type. The gsi > 15 test could cause these two pins - * to be shared as IRQ0, and they are not shareable. - * So test for this condition, and if necessary, avoid - * the pin collision. - */ - gsi = pci_irq++; - /* - * Don't assign IRQ used by ACPI SCI - */ - if (gsi == acpi_gbl_FADT.sci_interrupt) - gsi = pci_irq++; - gsi_to_irq[irq] = gsi; - } else { - printk(KERN_ERR "GSI %u is too high\n", gsi); - return gsi; - } - } -#endif - io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, - triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, - polarity == ACPI_ACTIVE_HIGH ? 0 : 1); - return gsi; -} - -#endif /* CONFIG_X86_IO_APIC */ -#endif /* CONFIG_ACPI */ diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c deleted file mode 100644 index 7d742577d68a..000000000000 --- a/arch/x86/kernel/mpparse_64.c +++ /dev/null @@ -1,991 +0,0 @@ -/* - * Intel Multiprocessor Specification 1.1 and 1.4 - * compliant MP-table parsing routines. - * - * (c) 1995 Alan Cox, Building #3 - * (c) 1998, 1999, 2000 Ingo Molnar - * - * Fixes - * Erich Boleyn : MP v1.4 and additional changes. - * Alan Cox : Added EBDA scanning - * Ingo Molnar : various cleanups and rewrites - * Maciej W. Rozycki: Bits for default MP configurations - * Paul Diefenbaugh: Added full ACPI support - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* Have we found an MP table */ -int smp_found_config; - -/* - * Various Linux-internal data structures created from the - * MP-table. - */ -DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); -int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; - -static int mp_current_pci_id = 0; - -/* - * Intel MP BIOS table parsing routines: - */ - -/* - * Checksum an MP configuration block. - */ - -static int __init mpf_checksum(unsigned char *mp, int len) -{ - int sum = 0; - - while (len--) - sum += *mp++; - - return sum & 0xFF; -} - -static void __cpuinit MP_processor_info(struct mpc_config_processor *m) -{ - int apicid; - char *bootup_cpu = ""; - - if (!(m->mpc_cpuflag & CPU_ENABLED)) { - disabled_cpus++; - return; - } -#ifdef CONFIG_X86_NUMAQ - apicid = mpc_apic_id(m, translation_table[mpc_record]); -#else - apicid = m->mpc_apicid; -#endif - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { - bootup_cpu = " (Bootup-CPU)"; - boot_cpu_physical_apicid = m->mpc_apicid; - } - - printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); - generic_processor_info(apicid, m->mpc_apicver); -} - -static void __init MP_bus_info(struct mpc_config_bus *m) -{ - char str[7]; - - memcpy(str, m->mpc_bustype, 6); - str[6] = 0; - -#ifdef CONFIG_X86_NUMAQ - mpc_oem_bus_info(m, str, translation_table[mpc_record]); -#else - Dprintk("Bus #%d is %s\n", m->mpc_busid, str); -#endif - -#if MAX_MP_BUSSES < 256 - if (m->mpc_busid >= MAX_MP_BUSSES) { - printk(KERN_WARNING "MP table busid value (%d) for bustype %s " - " is too large, max. supported is %d\n", - m->mpc_busid, str, MAX_MP_BUSSES - 1); - return; - } -#endif - - if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { - set_bit(m->mpc_busid, mp_bus_not_pci); -#if defined(CONFIG_EISA) || defined (CONFIG_MCA) - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; -#endif - } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { -#ifdef CONFIG_X86_NUMAQ - mpc_oem_pci_bus(m, translation_table[mpc_record]); -#endif - clear_bit(m->mpc_busid, mp_bus_not_pci); - mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; - mp_current_pci_id++; -#if defined(CONFIG_EISA) || defined (CONFIG_MCA) - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; - } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; - } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; -#endif - } else - printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); -} - -static int bad_ioapic(unsigned long address) -{ - if (nr_ioapics >= MAX_IO_APICS) { - printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " - "(found %d)\n", MAX_IO_APICS, nr_ioapics); - panic("Recompile kernel with bigger MAX_IO_APICS!\n"); - } - if (!address) { - printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" - " found in table, skipping!\n"); - return 1; - } - return 0; -} - -static void __init MP_ioapic_info(struct mpc_config_ioapic *m) -{ - if (!(m->mpc_flags & MPC_APIC_USABLE)) - return; - - printk(KERN_INFO "I/O APIC #%d at 0x%X.\n", m->mpc_apicid, - m->mpc_apicaddr); - - if (bad_ioapic(m->mpc_apicaddr)) - return; - - mp_ioapics[nr_ioapics] = *m; - nr_ioapics++; -} - -static void __init MP_intsrc_info(struct mpc_config_intsrc *m) -{ - mp_irqs[mp_irq_entries] = *m; - Dprintk("Int: type %d, pol %d, trig %d, bus %d," - " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, - m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); - if (++mp_irq_entries >= MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded!!\n"); -} - -static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) -{ - Dprintk("Lint: type %d, pol %d, trig %d, bus %d," - " IRQ %02x, APIC ID %x, APIC LINT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, - m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); -} - -/* - * Read/parse the MPC - */ -static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) -{ - char str[16]; - char oem[10]; - int count = sizeof(*mpc); - unsigned char *mpt = ((unsigned char *)mpc) + count; - - if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { - printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", - mpc->mpc_signature[0], mpc->mpc_signature[1], - mpc->mpc_signature[2], mpc->mpc_signature[3]); - return 0; - } - if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { - printk(KERN_ERR "MPTABLE: checksum error!\n"); - return 0; - } - if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { - printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", - mpc->mpc_spec); - return 0; - } - if (!mpc->mpc_lapic) { - printk(KERN_ERR "MPTABLE: null local APIC address!\n"); - return 0; - } - memcpy(oem, mpc->mpc_oem, 8); - oem[8] = 0; - printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem); - - memcpy(str, mpc->mpc_productid, 12); - str[12] = 0; - printk("Product ID: %s ", str); - -#ifdef CONFIG_X86_32 - mps_oem_check(mpc, oem, str); -#endif - printk(KERN_INFO "MPTABLE: Product ID: %s ", str); - - printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); - - /* save the local APIC address, it might be non-default */ - if (!acpi_lapic) - mp_lapic_addr = mpc->mpc_lapic; - - if (early) - return 1; - - /* - * Now process the configuration blocks. - */ -#ifdef CONFIG_X86_NUMAQ - mpc_record = 0; -#endif - while (count < mpc->mpc_length) { - switch (*mpt) { - case MP_PROCESSOR: - { - struct mpc_config_processor *m = - (struct mpc_config_processor *)mpt; - /* ACPI may have already provided this data */ - if (!acpi_lapic) - MP_processor_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_BUS: - { - struct mpc_config_bus *m = - (struct mpc_config_bus *)mpt; - MP_bus_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_IOAPIC: - { - struct mpc_config_ioapic *m = - (struct mpc_config_ioapic *)mpt; - MP_ioapic_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_INTSRC: - { - struct mpc_config_intsrc *m = - (struct mpc_config_intsrc *)mpt; - - MP_intsrc_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - case MP_LINTSRC: - { - struct mpc_config_lintsrc *m = - (struct mpc_config_lintsrc *)mpt; - MP_lintsrc_info(m); - mpt += sizeof(*m); - count += sizeof(*m); - break; - } - default: - { - count = mpc->mpc_length; - break; - } - } -#ifdef CONFIG_X86_NUMAQ - ++mpc_record; -#endif - } - setup_apic_routing(); - if (!num_processors) - printk(KERN_ERR "MPTABLE: no processors registered!\n"); - return num_processors; -} - -static int __init ELCR_trigger(unsigned int irq) -{ - unsigned int port; - - port = 0x4d0 + (irq >> 3); - return (inb(port) >> (irq & 7)) & 1; -} - -static void __init construct_default_ioirq_mptable(int mpc_default_type) -{ - struct mpc_config_intsrc intsrc; - int i; - int ELCR_fallback = 0; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* conforming */ - intsrc.mpc_srcbus = 0; - intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; - - intsrc.mpc_irqtype = mp_INT; - - /* - * If true, we have an ISA/PCI system with no IRQ entries - * in the MP table. To prevent the PCI interrupts from being set up - * incorrectly, we try to use the ELCR. The sanity check to see if - * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can - * never be level sensitive, so we simply see if the ELCR agrees. - * If it does, we assume it's valid. - */ - if (mpc_default_type == 5) { - printk(KERN_INFO "ISA/PCI bus type with no IRQ information... " - "falling back to ELCR\n"); - - if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || - ELCR_trigger(13)) - printk(KERN_ERR "ELCR contains invalid data... " - "not using ELCR\n"); - else { - printk(KERN_INFO - "Using ELCR to identify PCI interrupts\n"); - ELCR_fallback = 1; - } - } - - for (i = 0; i < 16; i++) { - switch (mpc_default_type) { - case 2: - if (i == 0 || i == 13) - continue; /* IRQ0 & IRQ13 not connected */ - /* fall through */ - default: - if (i == 2) - continue; /* IRQ2 is never connected */ - } - - if (ELCR_fallback) { - /* - * If the ELCR indicates a level-sensitive interrupt, we - * copy that information over to the MP table in the - * irqflag field (level sensitive, active high polarity). - */ - if (ELCR_trigger(i)) - intsrc.mpc_irqflag = 13; - else - intsrc.mpc_irqflag = 0; - } - - intsrc.mpc_srcbusirq = i; - intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ - MP_intsrc_info(&intsrc); - } - - intsrc.mpc_irqtype = mp_ExtINT; - intsrc.mpc_srcbusirq = 0; - intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ - MP_intsrc_info(&intsrc); -} - -static inline void __init construct_default_ISA_mptable(int mpc_default_type) -{ - struct mpc_config_processor processor; - struct mpc_config_bus bus; - struct mpc_config_ioapic ioapic; - struct mpc_config_lintsrc lintsrc; - int linttypes[2] = { mp_ExtINT, mp_NMI }; - int i; - - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - - /* - * 2 CPUs, numbered 0 & 1. - */ - processor.mpc_type = MP_PROCESSOR; - /* Either an integrated APIC or a discrete 82489DX. */ - processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - processor.mpc_cpuflag = CPU_ENABLED; - processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; - processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; - processor.mpc_reserved[0] = 0; - processor.mpc_reserved[1] = 0; - for (i = 0; i < 2; i++) { - processor.mpc_apicid = i; - MP_processor_info(&processor); - } - - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - switch (mpc_default_type) { - default: - printk(KERN_ERR "???\nUnknown standard configuration %d\n", - mpc_default_type); - /* fall through */ - case 1: - case 5: - memcpy(bus.mpc_bustype, "ISA ", 6); - break; - case 2: - case 6: - case 3: - memcpy(bus.mpc_bustype, "EISA ", 6); - break; - case 4: - case 7: - memcpy(bus.mpc_bustype, "MCA ", 6); - } - MP_bus_info(&bus); - if (mpc_default_type > 4) { - bus.mpc_busid = 1; - memcpy(bus.mpc_bustype, "PCI ", 6); - MP_bus_info(&bus); - } - - ioapic.mpc_type = MP_IOAPIC; - ioapic.mpc_apicid = 2; - ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - ioapic.mpc_flags = MPC_APIC_USABLE; - ioapic.mpc_apicaddr = 0xFEC00000; - MP_ioapic_info(&ioapic); - - /* - * We set up most of the low 16 IO-APIC pins according to MPS rules. - */ - construct_default_ioirq_mptable(mpc_default_type); - - lintsrc.mpc_type = MP_LINTSRC; - lintsrc.mpc_irqflag = 0; /* conforming */ - lintsrc.mpc_srcbusid = 0; - lintsrc.mpc_srcbusirq = 0; - lintsrc.mpc_destapic = MP_APIC_ALL; - for (i = 0; i < 2; i++) { - lintsrc.mpc_irqtype = linttypes[i]; - lintsrc.mpc_destapiclint = i; - MP_lintsrc_info(&lintsrc); - } -} - -static struct intel_mp_floating *mpf_found; - -/* - * Scan the memory blocks for an SMP configuration block. - */ -static void __init __get_smp_config(unsigned early) -{ - struct intel_mp_floating *mpf = mpf_found; - - if (acpi_lapic && early) - return; - /* - * ACPI supports both logical (e.g. Hyper-Threading) and physical - * processors, where MPS only supports physical. - */ - if (acpi_lapic && acpi_ioapic) { - printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " - "information\n"); - return; - } else if (acpi_lapic) - printk(KERN_INFO "Using ACPI for processor (LAPIC) " - "configuration information\n"); - - printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", - mpf->mpf_specification); -#ifdef CONFIG_X86_32 - if (mpf->mpf_feature2 & (1 << 7)) { - printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); - pic_mode = 1; - } else { - printk(KERN_INFO " Virtual Wire compatibility mode.\n"); - pic_mode = 0; - } -#endif - /* - * Now see if we need to read further. - */ - if (mpf->mpf_feature1 != 0) { - if (early) { - /* - * local APIC has default address - */ - mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - return; - } - - printk(KERN_INFO "Default MP configuration #%d\n", - mpf->mpf_feature1); - construct_default_ISA_mptable(mpf->mpf_feature1); - - } else if (mpf->mpf_physptr) { - - /* - * Read the physical hardware table. Anything here will - * override the defaults. - */ - if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { - smp_found_config = 0; - printk(KERN_ERR - "BIOS bug, MP table errors detected!...\n"); - printk(KERN_ERR "... disabling SMP support. " - "(tell your hw vendor)\n"); - return; - } - - if (early) - return; - /* - * If there are no explicit MP IRQ entries, then we are - * broken. We set up most of the low 16 IO-APIC pins to - * ISA defaults and hope it will work. - */ - if (!mp_irq_entries) { - struct mpc_config_bus bus; - - printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " - "using default mptable. " - "(tell your hw vendor)\n"); - - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - memcpy(bus.mpc_bustype, "ISA ", 6); - MP_bus_info(&bus); - - construct_default_ioirq_mptable(0); - } - - } else - BUG(); - - if (!early) - printk(KERN_INFO "Processors: %d\n", num_processors); - /* - * Only use the first configuration found. - */ -} - -void __init early_get_smp_config(void) -{ - __get_smp_config(1); -} - -void __init get_smp_config(void) -{ - __get_smp_config(0); -} - -static int __init smp_scan_config(unsigned long base, unsigned long length, - unsigned reserve) -{ - extern void __bad_mpf_size(void); - unsigned int *bp = phys_to_virt(base); - struct intel_mp_floating *mpf; - - Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); - if (sizeof(*mpf) != 16) - __bad_mpf_size(); - - while (length > 0) { - mpf = (struct intel_mp_floating *)bp; - if ((*bp == SMP_MAGIC_IDENT) && - (mpf->mpf_length == 1) && - !mpf_checksum((unsigned char *)bp, 16) && - ((mpf->mpf_specification == 1) - || (mpf->mpf_specification == 4))) { - - smp_found_config = 1; - mpf_found = mpf; -#ifdef CONFIG_X86_32 - printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", - mpf, virt_to_phys(mpf)); - reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, - BOOTMEM_DEFAULT); - if (mpf->mpf_physptr) { - /* - * We cannot access to MPC table to compute - * table size yet, as only few megabytes from - * the bottom is mapped now. - * PC-9800's MPC table places on the very last - * of physical memory; so that simply reserving - * PAGE_SIZE from mpg->mpf_physptr yields BUG() - * in reserve_bootmem. - */ - unsigned long size = PAGE_SIZE; - unsigned long end = max_low_pfn * PAGE_SIZE; - if (mpf->mpf_physptr + size > end) - size = end - mpf->mpf_physptr; - reserve_bootmem(mpf->mpf_physptr, size, - BOOTMEM_DEFAULT); - } - -#else - if (!reserve) - return 1; - - reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); - if (mpf->mpf_physptr) - reserve_bootmem_generic(mpf->mpf_physptr, - PAGE_SIZE); -#endif - return 1; - } - bp += 4; - length -= 16; - } - return 0; -} - -static void __init __find_smp_config(unsigned reserve) -{ - unsigned int address; - - /* - * FIXME: Linux assumes you have 640K of base ram.. - * this continues the error... - * - * 1) Scan the bottom 1K for a signature - * 2) Scan the top 1K of base RAM - * 3) Scan the 64K of bios - */ - if (smp_scan_config(0x0, 0x400, reserve) || - smp_scan_config(639 * 0x400, 0x400, reserve) || - smp_scan_config(0xF0000, 0x10000, reserve)) - return; - /* - * If it is an SMP machine we should know now. - * - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E, calculate and scan it here. - * - * NOTE! There are Linux loaders that will corrupt the EBDA - * area, and as such this kind of SMP config may be less - * trustworthy, simply because the SMP table may have been - * stomped on during early boot. These loaders are buggy and - * should be fixed. - * - * MP1.4 SPEC states to only scan first 1K of 4K EBDA. - */ - - address = get_bios_ebda(); - if (address) - smp_scan_config(address, 0x400, reserve); -} - -void __init early_find_smp_config(void) -{ - __find_smp_config(0); -} - -void __init find_smp_config(void) -{ - __find_smp_config(1); -} - -/* -------------------------------------------------------------------------- - ACPI-based MP Configuration - -------------------------------------------------------------------------- */ - -#ifdef CONFIG_ACPI - -#ifdef CONFIG_X86_IO_APIC - -#define MP_ISA_BUS 0 -#define MP_MAX_IOAPIC_PIN 127 - -extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; - -static int mp_find_ioapic(int gsi) -{ - int i = 0; - - /* Find the IOAPIC that manages this GSI. */ - for (i = 0; i < nr_ioapics; i++) { - if ((gsi >= mp_ioapic_routing[i].gsi_base) - && (gsi <= mp_ioapic_routing[i].gsi_end)) - return i; - } - - printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); - return -1; -} - -static u8 uniq_ioapic_id(u8 id) -{ -#ifdef CONFIG_X86_32 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && - !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) - return io_apic_get_unique_id(nr_ioapics, id); - else - return id; -#else - int i; - DECLARE_BITMAP(used, 256); - bitmap_zero(used, 256); - for (i = 0; i < nr_ioapics; i++) { - struct mpc_config_ioapic *ia = &mp_ioapics[i]; - __set_bit(ia->mpc_apicid, used); - } - if (!test_bit(id, used)) - return id; - return find_first_zero_bit(used, 256); -#endif -} - -void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) -{ - int idx = 0; - - if (bad_ioapic(address)) - return; - - idx = nr_ioapics; - - mp_ioapics[idx].mpc_type = MP_IOAPIC; - mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; - mp_ioapics[idx].mpc_apicaddr = address; - - set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); -#ifdef CONFIG_X86_32 - mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); -#else - mp_ioapics[idx].mpc_apicver = 0; -#endif - /* - * Build basic GSI lookup table to facilitate gsi->io_apic lookups - * and to prevent reprogramming of IOAPIC pins (PCI GSIs). - */ - mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; - mp_ioapic_routing[idx].gsi_base = gsi_base; - mp_ioapic_routing[idx].gsi_end = gsi_base + - io_apic_get_redir_entries(idx); - - printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, " - "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, - mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, - mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); - - nr_ioapics++; -} - -void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) -{ - struct mpc_config_intsrc intsrc; - int ioapic = -1; - int pin = -1; - - /* - * Convert 'gsi' to 'ioapic.pin'. - */ - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) - return; - pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - - /* - * TBD: This check is for faulty timer entries, where the override - * erroneously sets the trigger to level, resulting in a HUGE - * increase of timer interrupts! - */ - if ((bus_irq == 0) && (trigger == 3)) - trigger = 1; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_irqflag = (trigger << 2) | polarity; - intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ - intsrc.mpc_dstirq = pin; /* INTIN# */ - - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", - intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); - - mp_irqs[mp_irq_entries] = intsrc; - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded!\n"); -} - -int es7000_plat; - -void __init mp_config_acpi_legacy_irqs(void) -{ - struct mpc_config_intsrc intsrc; - int i = 0; - int ioapic = -1; - -#if defined (CONFIG_MCA) || defined (CONFIG_EISA) - /* - * Fabricate the legacy ISA bus (bus #31). - */ - mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; -#endif - set_bit(MP_ISA_BUS, mp_bus_not_pci); - Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); - - /* - * Older generations of ES7000 have no legacy identity mappings - */ - if (es7000_plat == 1) - return; - - /* - * Locate the IOAPIC that manages the ISA IRQs (0-15). - */ - ioapic = mp_find_ioapic(0); - if (ioapic < 0) - return; - - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* Conforming */ - intsrc.mpc_srcbus = MP_ISA_BUS; - intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; - - /* - * Use the default configuration for the IRQs 0-15. Unless - * overridden by (MADT) interrupt source override entries. - */ - for (i = 0; i < 16; i++) { - int idx; - - for (idx = 0; idx < mp_irq_entries; idx++) { - struct mpc_config_intsrc *irq = mp_irqs + idx; - - /* Do we already have a mapping for this ISA IRQ? */ - if (irq->mpc_srcbus == MP_ISA_BUS - && irq->mpc_srcbusirq == i) - break; - - /* Do we already have a mapping for this IOAPIC pin */ - if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && - (irq->mpc_dstirq == i)) - break; - } - - if (idx != mp_irq_entries) { - printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); - continue; /* IRQ already used */ - } - - intsrc.mpc_irqtype = mp_INT; - intsrc.mpc_srcbusirq = i; /* Identity mapped */ - intsrc.mpc_dstirq = i; - - Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " - "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, - (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, - intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, - intsrc.mpc_dstirq); - - mp_irqs[mp_irq_entries] = intsrc; - if (++mp_irq_entries == MAX_IRQ_SOURCES) - panic("Max # of irq sources exceeded!\n"); - } -} - -int mp_register_gsi(u32 gsi, int triggering, int polarity) -{ - int ioapic = -1; - int ioapic_pin = 0; - int idx, bit = 0; -#ifdef CONFIG_X86_32 -#define MAX_GSI_NUM 4096 -#define IRQ_COMPRESSION_START 64 - - static int pci_irq = IRQ_COMPRESSION_START; - /* - * Mapping between Global System Interrupts, which - * represent all possible interrupts, and IRQs - * assigned to actual devices. - */ - static int gsi_to_irq[MAX_GSI_NUM]; -#else - - if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) - return gsi; -#endif - - /* Don't set up the ACPI SCI because it's already set up */ - if (acpi_gbl_FADT.sci_interrupt == gsi) - return gsi; - - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) { - printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); - return gsi; - } - - ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; - -#ifdef CONFIG_X86_32 - if (ioapic_renumber_irq) - gsi = ioapic_renumber_irq(ioapic, gsi); -#endif - - /* - * Avoid pin reprogramming. PRTs typically include entries - * with redundant pin->gsi mappings (but unique PCI devices); - * we only program the IOAPIC on the first. - */ - bit = ioapic_pin % 32; - idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); - if (idx > 3) { - printk(KERN_ERR "Invalid reference to IOAPIC pin " - "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, - ioapic_pin); - return gsi; - } - if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { - Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", - mp_ioapic_routing[ioapic].apic_id, ioapic_pin); -#ifdef CONFIG_X86_32 - return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); -#else - return gsi; -#endif - } - - mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); -#ifdef CONFIG_X86_32 - /* - * For GSI >= 64, use IRQ compression - */ - if ((gsi >= IRQ_COMPRESSION_START) - && (triggering == ACPI_LEVEL_SENSITIVE)) { - /* - * For PCI devices assign IRQs in order, avoiding gaps - * due to unused I/O APIC pins. - */ - int irq = gsi; - if (gsi < MAX_GSI_NUM) { - /* - * Retain the VIA chipset work-around (gsi > 15), but - * avoid a problem where the 8254 timer (IRQ0) is setup - * via an override (so it's not on pin 0 of the ioapic), - * and at the same time, the pin 0 interrupt is a PCI - * type. The gsi > 15 test could cause these two pins - * to be shared as IRQ0, and they are not shareable. - * So test for this condition, and if necessary, avoid - * the pin collision. - */ - gsi = pci_irq++; - /* - * Don't assign IRQ used by ACPI SCI - */ - if (gsi == acpi_gbl_FADT.sci_interrupt) - gsi = pci_irq++; - gsi_to_irq[irq] = gsi; - } else { - printk(KERN_ERR "GSI %u is too high\n", gsi); - return gsi; - } - } -#endif - io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, - triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, - polarity == ACPI_ACTIVE_HIGH ? 0 : 1); - return gsi; -} - -#endif /* CONFIG_X86_IO_APIC */ -#endif /* CONFIG_ACPI */ -- cgit v1.2.1 From 03056c88cf65ec8375753900246b36ae1c4b8a33 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sun, 6 Apr 2008 14:47:00 +0200 Subject: x86: remove superfluous initialisation in boot code. In arch/x86/boot/compressed/misc.c, the variable vidmem is the only variable that ends up in de data segment. It's also superfluous, because the first thing the code does is: if (RM_SCREEN_INFO.orig_video_mode == 7) { vidmem = (char *) 0xb0000; vidport = 0x3b4; } else { vidmem = (char *) 0xb8000; vidport = 0x3d4; } This patch removes the initialisation. Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/misc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 9470a050f8a4..dad4e699f5a3 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -223,7 +223,7 @@ static memptr free_mem_end_ptr; #define HEAP_SIZE 0x4000 #endif -static char *vidmem = (char *)0xb8000; +static char *vidmem; static int vidport; static int lines, cols; -- cgit v1.2.1 From 9d25cb0811fd0bca2cfd80095ee7663147363f68 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 5 Apr 2008 22:39:04 +0900 Subject: x86: avoid redundant loop in io_apic_level_ack_pending() If one can find an ack pending pin, there is no need to check the rest of them. Signed-off-by: Akinobu Mita Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_64.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 65b6840e1820..cd01f3aa17ba 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -167,11 +167,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int value) writel(value, &io_apic->data); } -static int io_apic_level_ack_pending(unsigned int irq) +static bool io_apic_level_ack_pending(unsigned int irq) { struct irq_pin_list *entry; unsigned long flags; - int pending = 0; spin_lock_irqsave(&ioapic_lock, flags); entry = irq_2_pin + irq; @@ -184,13 +183,17 @@ static int io_apic_level_ack_pending(unsigned int irq) break; reg = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ - pending |= (reg >> 14) & 1; + if ((reg >> 14) & 1) { + spin_unlock_irqrestore(&ioapic_lock, flags); + return true; + } if (!entry->next) break; entry = irq_2_pin + entry->next; } spin_unlock_irqrestore(&ioapic_lock, flags); - return pending; + + return false; } /* -- cgit v1.2.1 From a2249cba2f1d7d06633de09c71353ae6b1567206 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 5 Apr 2008 22:39:05 +0900 Subject: x86: use ioapic_read_entry() and ioapic_write_entry() Remove duplicate code by using ioapic_read_entry() and ioapic_write_entry() in io_apic_{32,64}.c Signed-off-by: Akinobu Mita Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 10 ++-------- arch/x86/kernel/io_apic_64.c | 25 ++++++------------------- 2 files changed, 8 insertions(+), 27 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index db1b1f30b650..6d69de716b5e 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1228,7 +1228,6 @@ static void __init setup_IO_APIC_irqs(void) { struct IO_APIC_route_entry entry; int apic, pin, idx, irq, first_notcon = 1, vector; - unsigned long flags; apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); @@ -1294,9 +1293,7 @@ static void __init setup_IO_APIC_irqs(void) if (!apic && (irq < 16)) disable_8259A_irq(irq); } - spin_lock_irqsave(&ioapic_lock, flags); - __ioapic_write_entry(apic, pin, entry); - spin_unlock_irqrestore(&ioapic_lock, flags); + ioapic_write_entry(apic, pin, entry); } } @@ -2760,7 +2757,6 @@ int __init io_apic_get_redir_entries (int ioapic) int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) { struct IO_APIC_route_entry entry; - unsigned long flags; if (!IO_APIC_IRQ(irq)) { printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", @@ -2801,9 +2797,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a if (!ioapic && (irq < 16)) disable_8259A_irq(irq); - spin_lock_irqsave(&ioapic_lock, flags); - __ioapic_write_entry(ioapic, pin, entry); - spin_unlock_irqrestore(&ioapic_lock, flags); + ioapic_write_entry(ioapic, pin, entry); return 0; } diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index cd01f3aa17ba..6e383e126db2 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -917,9 +917,8 @@ static void __init setup_IO_APIC_irqs(void) static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) { struct IO_APIC_route_entry entry; - unsigned long flags; - memset(&entry,0,sizeof(entry)); + memset(&entry, 0, sizeof(entry)); disable_8259A_irq(0); @@ -947,10 +946,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in /* * Add it to the IO-APIC irq-routing table: */ - spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); - io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); - spin_unlock_irqrestore(&ioapic_lock, flags); + ioapic_write_entry(apic, pin, entry); enable_8259A_irq(0); } @@ -1611,17 +1607,14 @@ static inline void unlock_ExtINT_logic(void) int apic, pin, i; struct IO_APIC_route_entry entry0, entry1; unsigned char save_control, save_freq_select; - unsigned long flags; pin = find_isa_irq_pin(8, mp_INT); apic = find_isa_irq_apic(8, mp_INT); if (pin == -1) return; - spin_lock_irqsave(&ioapic_lock, flags); - *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin); - *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin); - spin_unlock_irqrestore(&ioapic_lock, flags); + entry0 = ioapic_read_entry(apic, pin); + clear_IO_APIC_pin(apic, pin); memset(&entry1, 0, sizeof(entry1)); @@ -1634,10 +1627,7 @@ static inline void unlock_ExtINT_logic(void) entry1.trigger = 0; entry1.vector = 0; - spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); - io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); - spin_unlock_irqrestore(&ioapic_lock, flags); + ioapic_write_entry(apic, pin, entry1); save_control = CMOS_READ(RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); @@ -1656,10 +1646,7 @@ static inline void unlock_ExtINT_logic(void) CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); clear_IO_APIC_pin(apic, pin); - spin_lock_irqsave(&ioapic_lock, flags); - io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); - io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); - spin_unlock_irqrestore(&ioapic_lock, flags); + ioapic_write_entry(apic, pin, entry0); } /* -- cgit v1.2.1 From 07004b12a1199f82c016eb976f493e5b70820a1d Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 5 Apr 2008 22:39:06 +0900 Subject: x86: remove unnecessary memset() No need to clear the memory allocated by alloc_bootmem(). It is already filled with zero. Signed-off-by: Akinobu Mita Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_64.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 6e383e126db2..616c53afb711 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -2315,7 +2315,6 @@ static struct resource * __init ioapic_setup_resources(void) res = (void *)mem; if (mem != NULL) { - memset(mem, 0, n); mem += sizeof(struct resource) * nr_ioapics; for (i = 0; i < nr_ioapics; i++) { -- cgit v1.2.1 From addfc66bb55234c154bb43f0f7606bc5c9fc511d Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 5 Apr 2008 22:39:07 +0900 Subject: x86: remove unnecessary tmp local variable There is no reason to use obscurer name. Signed-off-by: Akinobu Mita Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic_32.c | 3 +-- arch/x86/kernel/io_apic_64.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 6d69de716b5e..2e2f42074e18 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1996,8 +1996,7 @@ static inline void init_IO_APIC_traps(void) * 0x80, because int 0x80 is hm, kind of importantish. ;) */ for (irq = 0; irq < NR_IRQS ; irq++) { - int tmp = irq; - if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) { + if (IO_APIC_IRQ(irq) && !irq_vector[irq]) { /* * Hmm.. We don't have an entry for this, * so default to an old-fashioned 8259 diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 616c53afb711..09ea0ec39de6 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1527,8 +1527,7 @@ static inline void init_IO_APIC_traps(void) * 0x80, because int 0x80 is hm, kind of importantish. ;) */ for (irq = 0; irq < NR_IRQS ; irq++) { - int tmp = irq; - if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) { + if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) { /* * Hmm.. We don't have an entry for this, * so default to an old-fashioned 8259 -- cgit v1.2.1 From 7281c96f37f784d64b0241a7b082bb13bab9580b Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 5 Apr 2008 22:39:08 +0900 Subject: x86: use cpumask_of_cpu() Use cpumask_of_cpu() rather than the pair of cpus_clear() and cpu_set(). Signed-off-by: Akinobu Mita Signed-off-by: Ingo Molnar --- arch/x86/kernel/genapic_flat_64.c | 5 +---- arch/x86/kernel/io_apic_64.c | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 6a44e8dace37..1a9c68845ee8 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -138,12 +138,9 @@ static cpumask_t physflat_target_cpus(void) static cpumask_t physflat_vector_allocation_domain(int cpu) { - cpumask_t domain = CPU_MASK_NONE; - cpu_set(cpu, domain); - return domain; + return cpumask_of_cpu(cpu); } - static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) { send_IPI_mask_sequence(cpumask, vector); diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 09ea0ec39de6..b54464b26658 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1362,9 +1362,7 @@ static int ioapic_retrigger_irq(unsigned int irq) unsigned long flags; spin_lock_irqsave(&vector_lock, flags); - cpus_clear(mask); - cpu_set(first_cpu(cfg->domain), mask); - + mask = cpumask_of_cpu(first_cpu(cfg->domain)); send_IPI_mask(mask, cfg->vector); spin_unlock_irqrestore(&vector_lock, flags); -- cgit v1.2.1 From 6107a7c4e2a871c37bb6c49e5e8286079f0968f9 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sat, 5 Apr 2008 22:39:09 +0900 Subject: x86: use cpu_online() Signed-off-by: Akinobu Mita Signed-off-by: Ingo Molnar --- arch/x86/kernel/reboot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 66cd4afc1e57..9692202d3bfb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -411,12 +411,12 @@ static void native_machine_shutdown(void) #ifdef CONFIG_X86_32 /* See if there has been given a command line override */ if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && - cpu_isset(reboot_cpu, cpu_online_map)) + cpu_online(reboot_cpu)) reboot_cpu_id = reboot_cpu; #endif /* Make certain the cpu I'm about to reboot on is online */ - if (!cpu_isset(reboot_cpu_id, cpu_online_map)) + if (!cpu_online(reboot_cpu_id)) reboot_cpu_id = smp_processor_id(); /* Make certain I only run on the appropriate processor */ -- cgit v1.2.1 From 711554dbc4d5402338ce115dca0df38e9f633330 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 7 Apr 2008 11:36:39 -0700 Subject: x86: print out buggy mptable print out buggy mptable, instead of skipping it quietly Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 6e5e4547981c..70744e344fa1 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -390,10 +390,13 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) break; } default: - { - count = mpc->mpc_length; - break; - } + /* wrong mptable */ + printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); + printk(KERN_ERR "type %x\n", *mpt); + print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, + 1, mpc, mpc->mpc_length, 1); + count = mpc->mpc_length; + break; } #ifdef CONFIG_X86_NUMAQ ++mpc_record; -- cgit v1.2.1 From 63d38198a0f57dca87e6cb79931c7bedbb7ab069 Mon Sep 17 00:00:00 2001 From: Alok Kataria Date: Mon, 7 Apr 2008 11:38:33 -0700 Subject: x86: fix paranoia about using BIOS quickboot mechanism. > > Make sure that we clear the "shutdown status flag" in the CMOS > > register after each CPU is brought up. This fixes a problem where the > > "shutdown status flag" may remain set when a CPU is brought up after > > booting. > > btw., what problem does this result in, exactly? The shutdown status flag set to "0xA", corresponds to "JMP double word request without INT init". This JMP at reboot time is at an unintended location. And results in Triple faults in our case. Though this error at reboot can be safely ignored in a VM environment, am not sure what the effect would be on a physical system. May be it will result in a triple fault and an eventual hardware reset thus masking this BUG in the kernel. This fix just makes sure that we reset that status flag after initialization is done. Fix paranoia about using BIOS quickboot mechanism. Make sure that we clear the "shutdown status flag" in the CMOS register after each CPU is brought up. This fixes a problem where the "shutdown status flag" may remain set when a CPU is brought up after booting. Signed-off-by: Alok N Kataria Signed-off-by: Dan Arai Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 21ad3f396a05..4517d1c01eb5 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1001,6 +1001,11 @@ do_rest: /* mark "stuck" area as not stuck */ *((volatile unsigned long *)trampoline_base) = 0; + /* + * Cleanup possible dangling ends... + */ + smpboot_restore_warm_reset_vector(); + return boot_error; } @@ -1254,11 +1259,6 @@ void __init native_smp_prepare_boot_cpu(void) void __init native_smp_cpus_done(unsigned int max_cpus) { - /* - * Cleanup possible dangling ends... - */ - smpboot_restore_warm_reset_vector(); - Dprintk("Boot done.\n"); impress_friends(); -- cgit v1.2.1 From df96323dfaebdf7e17cdf0656096e6ab2158ec76 Mon Sep 17 00:00:00 2001 From: Jacek Luczak Date: Fri, 11 Apr 2008 13:28:37 +0200 Subject: x86: section mismatch fixes, #1 This patch fixes mismatch warnings in smp_checks() (in arch/x86/kernel/smpboot.c): WARNING: arch/x86/kernel/built-in.o(.text+0x11922): Section mismatch in reference from the function smp_checks() to the variable .cpuinit.data:smp_b_stepping The function smp_checks() references the variable __cpuinitdata smp_b_stepping. This is often because smp_checks lacks a __cpuinitdata annotation or the annotation of smp_b_stepping is wrong. Signed-off-by: Jacek Luczak Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 4517d1c01eb5..ca3929b16049 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -437,7 +437,7 @@ valid_k7: #endif } -void smp_checks(void) +void __cpuinit smp_checks(void) { if (smp_b_stepping) printk(KERN_WARNING "WARNING: SMP operation may be unreliable" -- cgit v1.2.1 From e223f162a1d37aeeaa6c1ee37d81cc084aa2b004 Mon Sep 17 00:00:00 2001 From: Jacek Luczak Date: Thu, 10 Apr 2008 21:16:41 +0200 Subject: x86: setup_trampoline() - fix section mismatch warning this patch fixes section mismatch warnings (on x86_64 host) in setup_trampoline(), which was referencing __initdata variables trampoline_data and trampoline_end. Warning messages: WARNING: arch/x86/kernel/built-in.o(.cpuinit.text+0x2b6a): Section mismatch in reference from the function setup_trampoline() to the variable .init.data:trampoline_data The function __cpuinit setup_trampoline() references a variable __initdata trampoline_data. If trampoline_data is only used by setup_trampoline then annotate trampoline_data with a matching annotation. WARNING: arch/x86/kernel/built-in.o(.cpuinit.text+0x2b71): Section mismatch in reference from the function setup_trampoline() to the variable .init.data:trampoline_end The function __cpuinit setup_trampoline() references a variable __initdata trampoline_end. If trampoline_end is only used by setup_trampoline then annotate trampoline_end with a matching annotation. Signed-off-by: Ingo Molnar --- arch/x86/kernel/trampoline_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 4aedd0bcee4c..2a07e67d6697 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -32,7 +32,7 @@ /* We can free up trampoline after bootup if cpu hotplug is not supported. */ #ifndef CONFIG_HOTPLUG_CPU -.section .init.data, "aw", @progbits +.section .cpuinit.data, "aw", @progbits #else .section .rodata, "a", @progbits #endif -- cgit v1.2.1 From f49688d459c5eaa62db3597cbfd3cb13e361d415 Mon Sep 17 00:00:00 2001 From: Paolo Ciarrocchi Date: Fri, 22 Feb 2008 23:11:39 +0100 Subject: x86: coding style fixes to arch/x86/kernel/acpi/sleep.c Signed-off-by: Paolo Ciarrocchi Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/sleep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 6bc815cd8cb3..dd78326ae47c 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -13,7 +13,7 @@ #include /* address in low memory of the wakeup routine. */ -unsigned long acpi_wakeup_address = 0; +unsigned long acpi_wakeup_address; unsigned long acpi_realmode_flags; extern char wakeup_start, wakeup_end; -- cgit v1.2.1 From e44b7b7525ad9d43163ab5e60c784325419e0ea6 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Thu, 10 Apr 2008 23:28:10 +0200 Subject: x86: move suspend wakeup code to C Move wakeup code to .c, so that video mode setting code can be shared between boot and wakeup. Remove nasty assembly code in 64-bit case by re-using trampoline code. Stack setup was fixed to clear high 16bits of %esp, maybe that fixes some machines. .c code sharing and morse code was done H. Peter Anvin, Sam Ravnborg reviewed kbuild related stuff, and it seems okay to him. Rafael did some cleanups. [rjw: * Made the patch stop breaking compilation on x86-32 * Added arch/x86/kernel/acpi/sleep.h * Got rid of compiler warnings in arch/x86/kernel/acpi/sleep.c * Fixed 32-bit compilation on x86-64 systems * Added include/asm-x86/trampoline.h and fixed the non-SMP compilation on 64-bit x86 * Removed arch/x86/kernel/acpi/sleep_32.c which was not used * Fixed some breakage caused by the integration of smpboot.c done under us in the meantime] Signed-off-by: Pavel Machek Signed-off-by: H. Peter Anvin Reviewed-by: Sam Ravnborg Signed-off-by: Rafael J. Wysocki Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/boot/Makefile | 2 +- arch/x86/boot/boot.h | 5 + arch/x86/boot/video-bios.c | 6 + arch/x86/boot/video-mode.c | 173 ++++++++++++++++ arch/x86/boot/video-vesa.c | 8 + arch/x86/boot/video-vga.c | 12 +- arch/x86/boot/video.c | 157 +-------------- arch/x86/kernel/acpi/Makefile | 9 +- arch/x86/kernel/acpi/realmode/Makefile | 57 ++++++ arch/x86/kernel/acpi/realmode/copy.S | 1 + arch/x86/kernel/acpi/realmode/video-bios.c | 1 + arch/x86/kernel/acpi/realmode/video-mode.c | 1 + arch/x86/kernel/acpi/realmode/video-vesa.c | 1 + arch/x86/kernel/acpi/realmode/video-vga.c | 1 + arch/x86/kernel/acpi/realmode/wakemain.c | 81 ++++++++ arch/x86/kernel/acpi/realmode/wakeup.S | 113 +++++++++++ arch/x86/kernel/acpi/realmode/wakeup.h | 36 ++++ arch/x86/kernel/acpi/realmode/wakeup.lds.S | 61 ++++++ arch/x86/kernel/acpi/sleep.c | 71 +++++-- arch/x86/kernel/acpi/sleep.h | 16 ++ arch/x86/kernel/acpi/sleep_32.c | 40 ---- arch/x86/kernel/acpi/wakeup_32.S | 247 ++--------------------- arch/x86/kernel/acpi/wakeup_64.S | 313 +---------------------------- arch/x86/kernel/acpi/wakeup_rm.S | 10 + arch/x86/kernel/e820_64.c | 5 +- arch/x86/kernel/head_64.S | 4 - arch/x86/kernel/setup_32.c | 4 +- arch/x86/kernel/setup_64.c | 1 + arch/x86/kernel/smpboot.c | 6 +- arch/x86/kernel/trampoline_64.S | 5 - arch/x86/mach-voyager/voyager_smp.c | 1 + include/asm-x86/smp.h | 13 -- include/asm-x86/trampoline.h | 21 ++ 34 files changed, 711 insertions(+), 773 deletions(-) create mode 100644 arch/x86/boot/video-mode.c create mode 100644 arch/x86/kernel/acpi/realmode/Makefile create mode 100644 arch/x86/kernel/acpi/realmode/copy.S create mode 100644 arch/x86/kernel/acpi/realmode/video-bios.c create mode 100644 arch/x86/kernel/acpi/realmode/video-mode.c create mode 100644 arch/x86/kernel/acpi/realmode/video-vesa.c create mode 100644 arch/x86/kernel/acpi/realmode/video-vga.c create mode 100644 arch/x86/kernel/acpi/realmode/wakemain.c create mode 100644 arch/x86/kernel/acpi/realmode/wakeup.S create mode 100644 arch/x86/kernel/acpi/realmode/wakeup.h create mode 100644 arch/x86/kernel/acpi/realmode/wakeup.lds.S create mode 100644 arch/x86/kernel/acpi/sleep.h delete mode 100644 arch/x86/kernel/acpi/sleep_32.c create mode 100644 arch/x86/kernel/acpi/wakeup_rm.S create mode 100644 include/asm-x86/trampoline.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0a7193ae45ed..4e32b6f7d31a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -181,7 +181,7 @@ config X86_BIOS_REBOOT config X86_TRAMPOLINE bool - depends on X86_SMP || (X86_VOYAGER && SMP) + depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) default y config KTIME_SCALAR diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 9695affeb584..7ee102f9c4f8 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -30,7 +30,7 @@ subdir- := compressed setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o setup-y += header.o main.o mca.o memory.o pm.o pmjump.o -setup-y += printf.o string.o tty.o video.o version.o +setup-y += printf.o string.o tty.o video.o video-mode.o version.o setup-$(CONFIG_X86_APM_BOOT) += apm.o setup-$(CONFIG_X86_VOYAGER) += voyager.o diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 7822a4983da2..09578070bfba 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -286,6 +286,11 @@ int getchar_timeout(void); /* video.c */ void set_video(void); +/* video-mode.c */ +int set_mode(u16 mode); +int mode_defined(u16 mode); +void probe_cards(int unsafe); + /* video-vesa.c */ void vesa_store_edid(void); diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c index ff664a117096..39e247e96172 100644 --- a/arch/x86/boot/video-bios.c +++ b/arch/x86/boot/video-bios.c @@ -50,6 +50,7 @@ static int set_bios_mode(u8 mode) if (new_mode == mode) return 0; /* Mode change OK */ +#ifndef _WAKEUP if (new_mode != boot_params.screen_info.orig_video_mode) { /* Mode setting failed, but we didn't end up where we started. That's bad. Try to revert to the original @@ -59,13 +60,18 @@ static int set_bios_mode(u8 mode) : "+a" (ax) : : "ebx", "ecx", "edx", "esi", "edi"); } +#endif return -1; } static int bios_probe(void) { u8 mode; +#ifdef _WAKEUP + u8 saved_mode = 0x03; +#else u8 saved_mode = boot_params.screen_info.orig_video_mode; +#endif u16 crtc; struct mode_info *mi; int nmodes = 0; diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c new file mode 100644 index 000000000000..748e8d06290a --- /dev/null +++ b/arch/x86/boot/video-mode.c @@ -0,0 +1,173 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright 2007-2008 rPath, Inc. - All Rights Reserved + * + * This file is part of the Linux kernel, and is made available under + * the terms of the GNU General Public License version 2. + * + * ----------------------------------------------------------------------- */ + +/* + * arch/i386/boot/video-mode.c + * + * Set the video mode. This is separated out into a different + * file in order to be shared with the ACPI wakeup code. + */ + +#include "boot.h" +#include "video.h" +#include "vesa.h" + +/* + * Common variables + */ +int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */ +u16 video_segment; +int force_x, force_y; /* Don't query the BIOS for cols/rows */ + +int do_restore; /* Screen contents changed during mode flip */ +int graphic_mode; /* Graphic mode with linear frame buffer */ + +/* Probe the video drivers and have them generate their mode lists. */ +void probe_cards(int unsafe) +{ + struct card_info *card; + static u8 probed[2]; + + if (probed[unsafe]) + return; + + probed[unsafe] = 1; + + for (card = video_cards; card < video_cards_end; card++) { + if (card->unsafe == unsafe) { + if (card->probe) + card->nmodes = card->probe(); + else + card->nmodes = 0; + } + } +} + +/* Test if a mode is defined */ +int mode_defined(u16 mode) +{ + struct card_info *card; + struct mode_info *mi; + int i; + + for (card = video_cards; card < video_cards_end; card++) { + mi = card->modes; + for (i = 0; i < card->nmodes; i++, mi++) { + if (mi->mode == mode) + return 1; + } + } + + return 0; +} + +/* Set mode (without recalc) */ +static int raw_set_mode(u16 mode, u16 *real_mode) +{ + int nmode, i; + struct card_info *card; + struct mode_info *mi; + + /* Drop the recalc bit if set */ + mode &= ~VIDEO_RECALC; + + /* Scan for mode based on fixed ID, position, or resolution */ + nmode = 0; + for (card = video_cards; card < video_cards_end; card++) { + mi = card->modes; + for (i = 0; i < card->nmodes; i++, mi++) { + int visible = mi->x || mi->y; + + if ((mode == nmode && visible) || + mode == mi->mode || + mode == (mi->y << 8)+mi->x) { + *real_mode = mi->mode; + return card->set_mode(mi); + } + + if (visible) + nmode++; + } + } + + /* Nothing found? Is it an "exceptional" (unprobed) mode? */ + for (card = video_cards; card < video_cards_end; card++) { + if (mode >= card->xmode_first && + mode < card->xmode_first+card->xmode_n) { + struct mode_info mix; + *real_mode = mix.mode = mode; + mix.x = mix.y = 0; + return card->set_mode(&mix); + } + } + + /* Otherwise, failure... */ + return -1; +} + +/* + * Recalculate the vertical video cutoff (hack!) + */ +static void vga_recalc_vertical(void) +{ + unsigned int font_size, rows; + u16 crtc; + u8 pt, ov; + + set_fs(0); + font_size = rdfs8(0x485); /* BIOS: font size (pixels) */ + rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */ + + rows *= font_size; /* Visible scan lines */ + rows--; /* ... minus one */ + + crtc = vga_crtc(); + + pt = in_idx(crtc, 0x11); + pt &= ~0x80; /* Unlock CR0-7 */ + out_idx(pt, crtc, 0x11); + + out_idx((u8)rows, crtc, 0x12); /* Lower height register */ + + ov = in_idx(crtc, 0x07); /* Overflow register */ + ov &= 0xbd; + ov |= (rows >> (8-1)) & 0x02; + ov |= (rows >> (9-6)) & 0x40; + out_idx(ov, crtc, 0x07); +} + +/* Set mode (with recalc if specified) */ +int set_mode(u16 mode) +{ + int rv; + u16 real_mode; + + /* Very special mode numbers... */ + if (mode == VIDEO_CURRENT_MODE) + return 0; /* Nothing to do... */ + else if (mode == NORMAL_VGA) + mode = VIDEO_80x25; + else if (mode == EXTENDED_VGA) + mode = VIDEO_8POINT; + + rv = raw_set_mode(mode, &real_mode); + if (rv) + return rv; + + if (mode & VIDEO_RECALC) + vga_recalc_vertical(); + + /* Save the canonical mode number for the kernel, not + an alias, size specification or menu position */ +#ifndef _WAKEUP + boot_params.hdr.vid_mode = real_mode; +#endif + return 0; +} diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 419b5c273374..5d5a3f6e8b5c 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -24,7 +24,11 @@ static struct vesa_mode_info vminfo; __videocard video_vesa; +#ifndef _WAKEUP static void vesa_store_mode_params_graphics(void); +#else /* _WAKEUP */ +static inline void vesa_store_mode_params_graphics(void) {} +#endif /* _WAKEUP */ static int vesa_probe(void) { @@ -165,6 +169,8 @@ static int vesa_set_mode(struct mode_info *mode) } +#ifndef _WAKEUP + /* Switch DAC to 8-bit mode */ static void vesa_dac_set_8bits(void) { @@ -288,6 +294,8 @@ void vesa_store_edid(void) #endif /* CONFIG_FIRMWARE_EDID */ } +#endif /* not _WAKEUP */ + __videocard video_vesa = { .card_name = "VESA", diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c index 7259387b7d19..330d6589a2ad 100644 --- a/arch/x86/boot/video-vga.c +++ b/arch/x86/boot/video-vga.c @@ -210,6 +210,8 @@ static int vga_set_mode(struct mode_info *mode) */ static int vga_probe(void) { + u16 ega_bx; + static const char *card_name[] = { "CGA/MDA/HGC", "EGA", "VGA" }; @@ -226,12 +228,16 @@ static int vga_probe(void) u8 vga_flag; asm(INT10 - : "=b" (boot_params.screen_info.orig_video_ega_bx) + : "=b" (ega_bx) : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */ : "ecx", "edx", "esi", "edi"); +#ifndef _WAKEUP + boot_params.screen_info.orig_video_ega_bx = ega_bx; +#endif + /* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */ - if ((u8)boot_params.screen_info.orig_video_ega_bx != 0x10) { + if ((u8)ega_bx != 0x10) { /* EGA/VGA */ asm(INT10 : "=a" (vga_flag) @@ -240,7 +246,9 @@ static int vga_probe(void) if (vga_flag == 0x1a) { adapter = ADAPTER_VGA; +#ifndef _WAKEUP boot_params.screen_info.orig_video_isVGA = 1; +#endif } else { adapter = ADAPTER_EGA; } diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 696d08f3843c..c1c47ba069ef 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -18,21 +18,6 @@ #include "video.h" #include "vesa.h" -/* - * Mode list variables - */ -static struct card_info cards[]; /* List of cards to probe for */ - -/* - * Common variables - */ -int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */ -u16 video_segment; -int force_x, force_y; /* Don't query the BIOS for cols/rows */ - -int do_restore = 0; /* Screen contents changed during mode flip */ -int graphic_mode; /* Graphic mode with linear frame buffer */ - static void store_cursor_position(void) { u16 curpos; @@ -107,147 +92,6 @@ static void store_mode_params(void) boot_params.screen_info.orig_video_lines = y; } -/* Probe the video drivers and have them generate their mode lists. */ -static void probe_cards(int unsafe) -{ - struct card_info *card; - static u8 probed[2]; - - if (probed[unsafe]) - return; - - probed[unsafe] = 1; - - for (card = video_cards; card < video_cards_end; card++) { - if (card->unsafe == unsafe) { - if (card->probe) - card->nmodes = card->probe(); - else - card->nmodes = 0; - } - } -} - -/* Test if a mode is defined */ -int mode_defined(u16 mode) -{ - struct card_info *card; - struct mode_info *mi; - int i; - - for (card = video_cards; card < video_cards_end; card++) { - mi = card->modes; - for (i = 0; i < card->nmodes; i++, mi++) { - if (mi->mode == mode) - return 1; - } - } - - return 0; -} - -/* Set mode (without recalc) */ -static int raw_set_mode(u16 mode, u16 *real_mode) -{ - int nmode, i; - struct card_info *card; - struct mode_info *mi; - - /* Drop the recalc bit if set */ - mode &= ~VIDEO_RECALC; - - /* Scan for mode based on fixed ID, position, or resolution */ - nmode = 0; - for (card = video_cards; card < video_cards_end; card++) { - mi = card->modes; - for (i = 0; i < card->nmodes; i++, mi++) { - int visible = mi->x || mi->y; - - if ((mode == nmode && visible) || - mode == mi->mode || - mode == (mi->y << 8)+mi->x) { - *real_mode = mi->mode; - return card->set_mode(mi); - } - - if (visible) - nmode++; - } - } - - /* Nothing found? Is it an "exceptional" (unprobed) mode? */ - for (card = video_cards; card < video_cards_end; card++) { - if (mode >= card->xmode_first && - mode < card->xmode_first+card->xmode_n) { - struct mode_info mix; - *real_mode = mix.mode = mode; - mix.x = mix.y = 0; - return card->set_mode(&mix); - } - } - - /* Otherwise, failure... */ - return -1; -} - -/* - * Recalculate the vertical video cutoff (hack!) - */ -static void vga_recalc_vertical(void) -{ - unsigned int font_size, rows; - u16 crtc; - u8 pt, ov; - - set_fs(0); - font_size = rdfs8(0x485); /* BIOS: font size (pixels) */ - rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */ - - rows *= font_size; /* Visible scan lines */ - rows--; /* ... minus one */ - - crtc = vga_crtc(); - - pt = in_idx(crtc, 0x11); - pt &= ~0x80; /* Unlock CR0-7 */ - out_idx(pt, crtc, 0x11); - - out_idx((u8)rows, crtc, 0x12); /* Lower height register */ - - ov = in_idx(crtc, 0x07); /* Overflow register */ - ov &= 0xbd; - ov |= (rows >> (8-1)) & 0x02; - ov |= (rows >> (9-6)) & 0x40; - out_idx(ov, crtc, 0x07); -} - -/* Set mode (with recalc if specified) */ -static int set_mode(u16 mode) -{ - int rv; - u16 real_mode; - - /* Very special mode numbers... */ - if (mode == VIDEO_CURRENT_MODE) - return 0; /* Nothing to do... */ - else if (mode == NORMAL_VGA) - mode = VIDEO_80x25; - else if (mode == EXTENDED_VGA) - mode = VIDEO_8POINT; - - rv = raw_set_mode(mode, &real_mode); - if (rv) - return rv; - - if (mode & VIDEO_RECALC) - vga_recalc_vertical(); - - /* Save the canonical mode number for the kernel, not - an alias, size specification or menu position */ - boot_params.hdr.vid_mode = real_mode; - return 0; -} - static unsigned int get_entry(void) { char entry_buf[4]; @@ -486,6 +330,7 @@ void set_video(void) printf("Undefined video mode number: %x\n", mode); mode = ASK_VGA; } + boot_params.hdr.vid_mode = mode; vesa_store_edid(); store_mode_params(); diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 19d3d6e9d09b..7335959b6aff 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -1,7 +1,14 @@ +subdir- := realmode + obj-$(CONFIG_ACPI) += boot.o -obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o +obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o processor.o endif +$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin + +$(obj)/realmode/wakeup.bin: FORCE + $(Q)$(MAKE) $(build)=$(obj)/realmode $@ + diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile new file mode 100644 index 000000000000..092900854acc --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/Makefile @@ -0,0 +1,57 @@ +# +# arch/x86/kernel/acpi/realmode/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +targets := wakeup.bin wakeup.elf + +wakeup-y += wakeup.o wakemain.o video-mode.o copy.o + +# The link order of the video-*.o modules can matter. In particular, +# video-vga.o *must* be listed first, followed by video-vesa.o. +# Hardware-specific drivers should follow in the order they should be +# probed, and video-bios.o should typically be last. +wakeup-y += video-vga.o +wakeup-y += video-vesa.o +wakeup-y += video-bios.o + +targets += $(wakeup-y) + +bootsrc := $(src)/../../../boot + +# --------------------------------------------------------------------------- + +# How to compile the 16-bit code. Note we always compile for -march=i386, +# that way we can complain to the user if the CPU is insufficient. +# Compile with _SETUP since this is similar to the boot-time setup code. +KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \ + -I$(srctree)/$(bootsrc) \ + $(cflags-y) \ + -Wall -Wstrict-prototypes \ + -march=i386 -mregparm=3 \ + -include $(srctree)/$(bootsrc)/code16gcc.h \ + -fno-strict-aliasing -fomit-frame-pointer \ + $(call cc-option, -ffreestanding) \ + $(call cc-option, -fno-toplevel-reorder,\ + $(call cc-option, -fno-unit-at-a-time)) \ + $(call cc-option, -fno-stack-protector) \ + $(call cc-option, -mpreferred-stack-boundary=2) +KBUILD_CFLAGS += $(call cc-option, -m32) +KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + +WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y)) + +LDFLAGS_wakeup.elf := -T + +CPPFLAGS_wakeup.lds += -P -C + +$(obj)/wakeup.elf: $(src)/wakeup.lds $(WAKEUP_OBJS) FORCE + $(call if_changed,ld) + +OBJCOPYFLAGS_wakeup.bin := -O binary + +$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE + $(call if_changed,objcopy) diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S new file mode 100644 index 000000000000..dc59ebee69d8 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/copy.S @@ -0,0 +1 @@ +#include "../../../boot/copy.S" diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c new file mode 100644 index 000000000000..7deabc144a27 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-bios.c @@ -0,0 +1 @@ +#include "../../../boot/video-bios.c" diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c new file mode 100644 index 000000000000..328ad209f113 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-mode.c @@ -0,0 +1 @@ +#include "../../../boot/video-mode.c" diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c new file mode 100644 index 000000000000..9dbb9672226a --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-vesa.c @@ -0,0 +1 @@ +#include "../../../boot/video-vesa.c" diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c new file mode 100644 index 000000000000..bcc81255f374 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-vga.c @@ -0,0 +1 @@ +#include "../../../boot/video-vga.c" diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/kernel/acpi/realmode/wakemain.c new file mode 100644 index 000000000000..883962d9eef2 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakemain.c @@ -0,0 +1,81 @@ +#include "wakeup.h" +#include "boot.h" + +static void udelay(int loops) +{ + while (loops--) + io_delay(); /* Approximately 1 us */ +} + +static void beep(unsigned int hz) +{ + u8 enable; + + if (!hz) { + enable = 0x00; /* Turn off speaker */ + } else { + u16 div = 1193181/hz; + + outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */ + io_delay(); + outb(div, 0x42); /* LSB of counter */ + io_delay(); + outb(div >> 8, 0x42); /* MSB of counter */ + io_delay(); + + enable = 0x03; /* Turn on speaker */ + } + inb(0x61); /* Dummy read of System Control Port B */ + io_delay(); + outb(enable, 0x61); /* Enable timer 2 output to speaker */ + io_delay(); +} + +#define DOT_HZ 880 +#define DASH_HZ 587 +#define US_PER_DOT 125000 + +/* Okay, this is totally silly, but it's kind of fun. */ +static void send_morse(const char *pattern) +{ + char s; + + while ((s = *pattern++)) { + switch (s) { + case '.': + beep(DOT_HZ); + udelay(US_PER_DOT); + beep(0); + udelay(US_PER_DOT); + break; + case '-': + beep(DASH_HZ); + udelay(US_PER_DOT * 3); + beep(0); + udelay(US_PER_DOT); + break; + default: /* Assume it's a space */ + udelay(US_PER_DOT * 3); + break; + } + } +} + +void main(void) +{ + /* Kill machine if structures are wrong */ + if (wakeup_header.real_magic != 0x12345678) + while (1); + + if (wakeup_header.realmode_flags & 4) + send_morse("...-"); + + if (wakeup_header.realmode_flags & 1) + asm volatile("lcallw $0xc000,$3"); + + if (wakeup_header.realmode_flags & 2) { + /* Need to call BIOS */ + probe_cards(0); + set_mode(wakeup_header.video_mode); + } +} diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S new file mode 100644 index 000000000000..f9b77fb37e5b --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakeup.S @@ -0,0 +1,113 @@ +/* + * ACPI wakeup real mode startup stub + */ +#include +#include +#include +#include + + .code16 + .section ".header", "a" + +/* This should match the structure in wakeup.h */ + .globl wakeup_header +wakeup_header: +video_mode: .short 0 /* Video mode number */ +pmode_return: .byte 0x66, 0xea /* ljmpl */ + .long 0 /* offset goes here */ + .short __KERNEL_CS +pmode_cr0: .long 0 /* Saved %cr0 */ +pmode_cr3: .long 0 /* Saved %cr3 */ +pmode_cr4: .long 0 /* Saved %cr4 */ +pmode_efer: .quad 0 /* Saved EFER */ +pmode_gdt: .quad 0 +realmode_flags: .long 0 +real_magic: .long 0 +trampoline_segment: .word 0 +signature: .long 0x51ee1111 + + .text + .globl _start + .code16 +wakeup_code: +_start: + cli + cld + + /* Set up segments */ + movw %cs, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %ss + + movl $wakeup_stack_end, %esp + + /* Clear the EFLAGS */ + pushl $0 + popfl + + /* Check header signature... */ + movl signature, %eax + cmpl $0x51ee1111, %eax + jne bogus_real_magic + + /* Check we really have everything... */ + movl end_signature, %eax + cmpl $0x65a22c82, %eax + jne bogus_real_magic + + /* Call the C code */ + calll main + + /* Do any other stuff... */ + +#ifndef CONFIG_64BIT + /* This could also be done in C code... */ + movl pmode_cr3, %eax + movl %eax, %cr3 + + movl pmode_cr4, %ecx + jecxz 1f + movl %ecx, %cr4 +1: + movl pmode_efer, %eax + movl pmode_efer + 4, %edx + movl %eax, %ecx + orl %edx, %ecx + jz 1f + movl $0xc0000080, %ecx + wrmsr +1: + + lgdtl pmode_gdt + + /* This really couldn't... */ + movl pmode_cr0, %eax + movl %eax, %cr0 + jmp pmode_return +#else + pushw $0 + pushw trampoline_segment + pushw $0 + lret +#endif + +bogus_real_magic: +1: + hlt + jmp 1b + + .data + .balign 4 + .globl HEAP, heap_end +HEAP: + .long wakeup_heap +heap_end: + .long wakeup_stack + + .bss +wakeup_heap: + .space 2048 +wakeup_stack: + .space 2048 +wakeup_stack_end: diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h new file mode 100644 index 000000000000..ef8166fe8020 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakeup.h @@ -0,0 +1,36 @@ +/* + * Definitions for the wakeup data structure at the head of the + * wakeup code. + */ + +#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H +#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H + +#ifndef __ASSEMBLY__ +#include + +/* This must match data at wakeup.S */ +struct wakeup_header { + u16 video_mode; /* Video mode number */ + u16 _jmp1; /* ljmpl opcode, 32-bit only */ + u32 pmode_entry; /* Protected mode resume point, 32-bit only */ + u16 _jmp2; /* CS value, 32-bit only */ + u32 pmode_cr0; /* Protected mode cr0 */ + u32 pmode_cr3; /* Protected mode cr3 */ + u32 pmode_cr4; /* Protected mode cr4 */ + u32 pmode_efer_low; /* Protected mode EFER */ + u32 pmode_efer_high; + u64 pmode_gdt; + u32 realmode_flags; + u32 real_magic; + u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ + u32 signature; /* To check we have correct structure */ +} __attribute__((__packed__)); + +extern struct wakeup_header wakeup_header; +#endif + +#define HEADER_OFFSET 0x3f00 +#define WAKEUP_SIZE 0x4000 + +#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S new file mode 100644 index 000000000000..22fab6c4be15 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S @@ -0,0 +1,61 @@ +/* + * wakeup.ld + * + * Linker script for the real-mode wakeup code + */ +#undef i386 +#include "wakeup.h" + +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_ARCH(i386) +ENTRY(_start) + +SECTIONS +{ + . = HEADER_OFFSET; + .header : { + *(.header) + } + + . = 0; + .text : { + *(.text*) + } + + . = ALIGN(16); + .rodata : { + *(.rodata*) + } + + .videocards : { + video_cards = .; + *(.videocards) + video_cards_end = .; + } + + . = ALIGN(16); + .data : { + *(.data*) + } + + .signature : { + end_signature = .; + LONG(0x65a22c82) + } + + . = ALIGN(16); + .bss : { + __bss_start = .; + *(.bss) + __bss_end = .; + } + + . = ALIGN(16); + _end = .; + + /DISCARD/ : { + *(.note*) + } + + . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); +} diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index dd78326ae47c..afc25ee9964b 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -10,30 +10,72 @@ #include #include -#include +#include "realmode/wakeup.h" +#include "sleep.h" -/* address in low memory of the wakeup routine. */ unsigned long acpi_wakeup_address; unsigned long acpi_realmode_flags; -extern char wakeup_start, wakeup_end; -extern unsigned long acpi_copy_wakeup_routine(unsigned long); +/* address in low memory of the wakeup routine. */ +static unsigned long acpi_realmode; + +#ifdef CONFIG_64BIT +static char temp_stack[10240]; +#endif /** * acpi_save_state_mem - save kernel state * * Create an identity mapped page table and copy the wakeup routine to * low memory. + * + * Note that this is too late to change acpi_wakeup_address. */ int acpi_save_state_mem(void) { - if (!acpi_wakeup_address) { - printk(KERN_ERR "Could not allocate memory during boot, S3 disabled\n"); + struct wakeup_header *header; + + if (!acpi_realmode) { + printk(KERN_ERR "Could not allocate memory during boot, " + "S3 disabled\n"); return -ENOMEM; } - memcpy((void *)acpi_wakeup_address, &wakeup_start, - &wakeup_end - &wakeup_start); - acpi_copy_wakeup_routine(acpi_wakeup_address); + memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE); + + header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET); + if (header->signature != 0x51ee1111) { + printk(KERN_ERR "wakeup header does not match\n"); + return -EINVAL; + } + + header->video_mode = saved_video_mode; + +#ifndef CONFIG_64BIT + store_gdt((struct desc_ptr *)&header->pmode_gdt); + + header->pmode_efer_low = nx_enabled; + if (header->pmode_efer_low & 1) { + /* This is strange, why not save efer, always? */ + rdmsr(MSR_EFER, header->pmode_efer_low, + header->pmode_efer_high); + } +#endif /* !CONFIG_64BIT */ + + header->pmode_cr0 = read_cr0(); + header->pmode_cr4 = read_cr4(); + header->realmode_flags = acpi_realmode_flags; + header->real_magic = 0x12345678; + +#ifndef CONFIG_64BIT + header->pmode_entry = (u32)&wakeup_pmode_return; + header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET); + saved_magic = 0x12345678; +#else /* CONFIG_64BIT */ + header->trampoline_segment = setup_trampoline() >> 4; + init_rsp = (unsigned long)temp_stack + 4096; + initial_code = (unsigned long)wakeup_long64; + saved_magic = 0x123456789abcdef0; +#endif /* CONFIG_64BIT */ return 0; } @@ -56,15 +98,20 @@ void acpi_restore_state_mem(void) */ void __init acpi_reserve_bootmem(void) { - if ((&wakeup_end - &wakeup_start) > PAGE_SIZE*2) { + if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { printk(KERN_ERR "ACPI: Wakeup code way too big, S3 disabled.\n"); return; } - acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2); - if (!acpi_wakeup_address) + acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE); + + if (!acpi_realmode) { printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); + return; + } + + acpi_wakeup_address = acpi_realmode; } diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h new file mode 100644 index 000000000000..adbcbaa6f1df --- /dev/null +++ b/arch/x86/kernel/acpi/sleep.h @@ -0,0 +1,16 @@ +/* + * Variables and functions used by the code in sleep.c + */ + +#include + +extern char wakeup_code_start, wakeup_code_end; + +extern unsigned long saved_video_mode; +extern long saved_magic; + +extern int wakeup_pmode_return; +extern char swsusp_pg_dir[PAGE_SIZE]; + +extern unsigned long acpi_copy_wakeup_routine(unsigned long); +extern void wakeup_long64(void); diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c deleted file mode 100644 index 63fe5525e026..000000000000 --- a/arch/x86/kernel/acpi/sleep_32.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * sleep.c - x86-specific ACPI sleep support. - * - * Copyright (C) 2001-2003 Patrick Mochel - * Copyright (C) 2001-2003 Pavel Machek - */ - -#include -#include -#include -#include - -#include - -/* Ouch, we want to delete this. We already have better version in userspace, in - s2ram from suspend.sf.net project */ -static __init int reset_videomode_after_s3(const struct dmi_system_id *d) -{ - acpi_realmode_flags |= 2; - return 0; -} - -static __initdata struct dmi_system_id acpisleep_dmi_table[] = { - { /* Reset video mode after returning from ACPI S3 sleep */ - .callback = reset_videomode_after_s3, - .ident = "Toshiba Satellite 4030cdt", - .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), - }, - }, - {} -}; - -static int __init acpisleep_dmi_init(void) -{ - dmi_check_system(acpisleep_dmi_table); - return 0; -} - -core_initcall(acpisleep_dmi_init); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index f53e3277f8e5..a12e6a9fb659 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -3,178 +3,12 @@ #include #include -# -# wakeup_code runs in real mode, and at unknown address (determined at run-time). -# Therefore it must only use relative jumps/calls. -# -# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled -# -# If physical address of wakeup_code is 0x12345, BIOS should call us with -# cs = 0x1234, eip = 0x05 -# - -#define BEEP \ - inb $97, %al; \ - outb %al, $0x80; \ - movb $3, %al; \ - outb %al, $97; \ - outb %al, $0x80; \ - movb $-74, %al; \ - outb %al, $67; \ - outb %al, $0x80; \ - movb $-119, %al; \ - outb %al, $66; \ - outb %al, $0x80; \ - movb $15, %al; \ - outb %al, $66; - -ALIGN - .align 4096 -ENTRY(wakeup_start) -wakeup_code: - wakeup_code_start = . - .code16 - - cli - cld - - # setup data segment - movw %cs, %ax - movw %ax, %ds # Make ds:0 point to wakeup_start - movw %ax, %ss - - testl $4, realmode_flags - wakeup_code - jz 1f - BEEP -1: - mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board - - pushl $0 # Kill any dangerous flags - popfl - - movl real_magic - wakeup_code, %eax - cmpl $0x12345678, %eax - jne bogus_real_magic - - testl $1, realmode_flags - wakeup_code - jz 1f - lcall $0xc000,$3 - movw %cs, %ax - movw %ax, %ds # Bios might have played with that - movw %ax, %ss -1: - - testl $2, realmode_flags - wakeup_code - jz 1f - mov video_mode - wakeup_code, %ax - call mode_set -1: - - # set up page table - movl $swsusp_pg_dir-__PAGE_OFFSET, %eax - movl %eax, %cr3 - - testl $1, real_efer_save_restore - wakeup_code - jz 4f - # restore efer setting - movl real_save_efer_edx - wakeup_code, %edx - movl real_save_efer_eax - wakeup_code, %eax - mov $0xc0000080, %ecx - wrmsr -4: - # make sure %cr4 is set correctly (features, etc) - movl real_save_cr4 - wakeup_code, %eax - movl %eax, %cr4 - - # need a gdt -- use lgdtl to force 32-bit operands, in case - # the GDT is located past 16 megabytes. - lgdtl real_save_gdt - wakeup_code - - movl real_save_cr0 - wakeup_code, %eax - movl %eax, %cr0 - jmp 1f -1: - movl real_magic - wakeup_code, %eax - cmpl $0x12345678, %eax - jne bogus_real_magic - - testl $8, realmode_flags - wakeup_code - jz 1f - BEEP -1: - ljmpl $__KERNEL_CS, $wakeup_pmode_return - -real_save_gdt: .word 0 - .long 0 -real_save_cr0: .long 0 -real_save_cr3: .long 0 -real_save_cr4: .long 0 -real_magic: .long 0 -video_mode: .long 0 -realmode_flags: .long 0 -real_efer_save_restore: .long 0 -real_save_efer_edx: .long 0 -real_save_efer_eax: .long 0 - -bogus_real_magic: - jmp bogus_real_magic - -/* This code uses an extended set of video mode numbers. These include: - * Aliases for standard modes - * NORMAL_VGA (-1) - * EXTENDED_VGA (-2) - * ASK_VGA (-3) - * Video modes numbered by menu position -- NOT RECOMMENDED because of lack - * of compatibility when extending the table. These are between 0x00 and 0xff. - */ -#define VIDEO_FIRST_MENU 0x0000 - -/* Standard BIOS video modes (BIOS number + 0x0100) */ -#define VIDEO_FIRST_BIOS 0x0100 - -/* VESA BIOS video modes (VESA number + 0x0200) */ -#define VIDEO_FIRST_VESA 0x0200 - -/* Video7 special modes (BIOS number + 0x0900) */ -#define VIDEO_FIRST_V7 0x0900 - -# Setting of user mode (AX=mode ID) => CF=success - -# For now, we only handle VESA modes (0x0200..0x03ff). To handle other -# modes, we should probably compile in the video code from the boot -# directory. -mode_set: - movw %ax, %bx - subb $VIDEO_FIRST_VESA>>8, %bh - cmpb $2, %bh - jb check_vesa - -setbad: - clc - ret - -check_vesa: - orw $0x4000, %bx # Use linear frame buffer - movw $0x4f02, %ax # VESA BIOS mode set call - int $0x10 - cmpw $0x004f, %ax # AL=4f if implemented - jnz setbad # AH=0 if OK - - stc - ret +# Copyright 2003, 2008 Pavel Machek , distribute under GPLv2 .code32 ALIGN -.org 0x800 -wakeup_stack_begin: # Stack grows down - -.org 0xff0 # Just below end of page -wakeup_stack: -ENTRY(wakeup_end) - -.org 0x1000 - +ENTRY(wakeup_pmode_return) wakeup_pmode_return: movw $__KERNEL_DS, %ax movw %ax, %ss @@ -187,7 +21,7 @@ wakeup_pmode_return: lgdt saved_gdt lidt saved_idt lldt saved_ldt - ljmp $(__KERNEL_CS),$1f + ljmp $(__KERNEL_CS), $1f 1: movl %cr3, %eax movl %eax, %cr3 @@ -201,82 +35,41 @@ wakeup_pmode_return: jne bogus_magic # jump to place where we left off - movl saved_eip,%eax + movl saved_eip, %eax jmp *%eax bogus_magic: jmp bogus_magic -## -# acpi_copy_wakeup_routine -# -# Copy the above routine to low memory. -# -# Parameters: -# %eax: place to copy wakeup routine to -# -# Returned address is location of code in low memory (past data and stack) -# -ENTRY(acpi_copy_wakeup_routine) - pushl %ebx +save_registers: sgdt saved_gdt sidt saved_idt sldt saved_ldt str saved_tss - movl nx_enabled, %edx - movl %edx, real_efer_save_restore - wakeup_start (%eax) - testl $1, real_efer_save_restore - wakeup_start (%eax) - jz 2f - # save efer setting - pushl %eax - movl %eax, %ebx - mov $0xc0000080, %ecx - rdmsr - movl %edx, real_save_efer_edx - wakeup_start (%ebx) - movl %eax, real_save_efer_eax - wakeup_start (%ebx) - popl %eax -2: - - movl %cr3, %edx - movl %edx, real_save_cr3 - wakeup_start (%eax) - movl %cr4, %edx - movl %edx, real_save_cr4 - wakeup_start (%eax) - movl %cr0, %edx - movl %edx, real_save_cr0 - wakeup_start (%eax) - sgdt real_save_gdt - wakeup_start (%eax) - - movl saved_videomode, %edx - movl %edx, video_mode - wakeup_start (%eax) - movl acpi_realmode_flags, %edx - movl %edx, realmode_flags - wakeup_start (%eax) - movl $0x12345678, real_magic - wakeup_start (%eax) - movl $0x12345678, saved_magic - popl %ebx - ret - -save_registers: leal 4(%esp), %eax movl %eax, saved_context_esp - movl %ebx, saved_context_ebx - movl %ebp, saved_context_ebp - movl %esi, saved_context_esi - movl %edi, saved_context_edi - pushfl ; popl saved_context_eflags - - movl $ret_point, saved_eip + movl %ebx, saved_context_ebx + movl %ebp, saved_context_ebp + movl %esi, saved_context_esi + movl %edi, saved_context_edi + pushfl + popl saved_context_eflags + + movl $ret_point, saved_eip ret restore_registers: - movl saved_context_ebp, %ebp - movl saved_context_ebx, %ebx - movl saved_context_esi, %esi - movl saved_context_edi, %edi - pushl saved_context_eflags ; popfl - ret + movl saved_context_ebp, %ebp + movl saved_context_ebx, %ebx + movl saved_context_esi, %esi + movl saved_context_edi, %edi + pushl saved_context_eflags + popfl + ret ENTRY(do_suspend_lowlevel) call save_processor_state diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 2e1b9e0d0767..bcc293423a70 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -7,191 +7,18 @@ #include # Copyright 2003 Pavel Machek , distribute under GPLv2 -# -# wakeup_code runs in real mode, and at unknown address (determined at run-time). -# Therefore it must only use relative jumps/calls. -# -# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled -# -# If physical address of wakeup_code is 0x12345, BIOS should call us with -# cs = 0x1234, eip = 0x05 -# - -#define BEEP \ - inb $97, %al; \ - outb %al, $0x80; \ - movb $3, %al; \ - outb %al, $97; \ - outb %al, $0x80; \ - movb $-74, %al; \ - outb %al, $67; \ - outb %al, $0x80; \ - movb $-119, %al; \ - outb %al, $66; \ - outb %al, $0x80; \ - movb $15, %al; \ - outb %al, $66; - - -ALIGN - .align 16 -ENTRY(wakeup_start) -wakeup_code: - wakeup_code_start = . - .code16 - -# Running in *copy* of this code, somewhere in low 1MB. - - cli - cld - # setup data segment - movw %cs, %ax - movw %ax, %ds # Make ds:0 point to wakeup_start - movw %ax, %ss - - # Data segment must be set up before we can see whether to beep. - testl $4, realmode_flags - wakeup_code - jz 1f - BEEP -1: - - # Private stack is needed for ASUS board - mov $(wakeup_stack - wakeup_code), %sp - - pushl $0 # Kill any dangerous flags - popfl - - movl real_magic - wakeup_code, %eax - cmpl $0x12345678, %eax - jne bogus_real_magic - - testl $1, realmode_flags - wakeup_code - jz 1f - lcall $0xc000,$3 - movw %cs, %ax - movw %ax, %ds # Bios might have played with that - movw %ax, %ss -1: - - testl $2, realmode_flags - wakeup_code - jz 1f - mov video_mode - wakeup_code, %ax - call mode_set -1: - - mov %ds, %ax # Find 32bit wakeup_code addr - movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) - shll $4, %esi - # Fix up the vectors - addl %esi, wakeup_32_vector - wakeup_code - addl %esi, wakeup_long64_vector - wakeup_code - addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer - - lidtl %ds:idt_48a - wakeup_code - lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is - # appropriate - - movl $1, %eax # protected mode (PE) bit - lmsw %ax # This is it! - jmp 1f -1: - - ljmpl *(wakeup_32_vector - wakeup_code) - - .balign 4 -wakeup_32_vector: - .long wakeup_32 - wakeup_code - .word __KERNEL32_CS, 0 - - .code32 -wakeup_32: -# Running in this code, but at low address; paging is not yet turned on. - - movl $__KERNEL_DS, %eax - movl %eax, %ds - - /* - * Prepare for entering 64bits mode - */ - - /* Enable PAE */ - xorl %eax, %eax - btsl $5, %eax - movl %eax, %cr4 - - /* Setup early boot stage 4 level pagetables */ - leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax - movl %eax, %cr3 - - /* Check if nx is implemented */ - movl $0x80000001, %eax - cpuid - movl %edx,%edi - - /* Enable Long Mode */ - xorl %eax, %eax - btsl $_EFER_LME, %eax - - /* No Execute supported? */ - btl $20,%edi - jnc 1f - btsl $_EFER_NX, %eax - - /* Make changes effective */ -1: movl $MSR_EFER, %ecx - xorl %edx, %edx - wrmsr - - xorl %eax, %eax - btsl $31, %eax /* Enable paging and in turn activate Long Mode */ - btsl $0, %eax /* Enable protected mode */ - - /* Make changes effective */ - movl %eax, %cr0 - - /* At this point: - CR4.PAE must be 1 - CS.L must be 0 - CR3 must point to PML4 - Next instruction must be a branch - This must be on identity-mapped page - */ - /* - * At this point we're in long mode but in 32bit compatibility mode - * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn - * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load - * the new gdt/idt that has __KERNEL_CS with CS.L = 1. - */ - - /* Finally jump in 64bit mode */ - ljmp *(wakeup_long64_vector - wakeup_code)(%esi) - - .balign 4 -wakeup_long64_vector: - .long wakeup_long64 - wakeup_code - .word __KERNEL_CS, 0 .code64 - - /* Hooray, we are in Long 64-bit mode (but still running in - * low memory) - */ -wakeup_long64: /* - * We must switch to a new descriptor in kernel space for the GDT - * because soon the kernel won't have access anymore to the userspace - * addresses where we're currently running on. We have to do that here - * because in 32bit we couldn't load a 64bit linear address. + * Hooray, we are in Long 64-bit mode (but still running in low memory) */ - lgdt cpu_gdt_descr - - movq saved_magic, %rax - movq $0x123456789abcdef0, %rdx - cmpq %rdx, %rax - jne bogus_64_magic +ENTRY(wakeup_long64) +wakeup_long64: + movq saved_magic, %rax + movq $0x123456789abcdef0, %rdx + cmpq %rdx, %rax + jne bogus_64_magic - nop - nop movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds @@ -208,130 +35,8 @@ wakeup_long64: movq saved_rip, %rax jmp *%rax -.code32 - - .align 64 -gdta: - /* Its good to keep gdt in sync with one in trampoline.S */ - .word 0, 0, 0, 0 # dummy - /* ??? Why I need the accessed bit set in order for this to work? */ - .quad 0x00cf9b000000ffff # __KERNEL32_CS - .quad 0x00af9b000000ffff # __KERNEL_CS - .quad 0x00cf93000000ffff # __KERNEL_DS - -idt_48a: - .word 0 # idt limit = 0 - .word 0, 0 # idt base = 0L - -gdt_48a: - .word 0x800 # gdt limit=2048, - # 256 GDT entries - .long gdta - wakeup_code # gdt base (relocated in later) - -real_magic: .quad 0 -video_mode: .quad 0 -realmode_flags: .quad 0 - -.code16 -bogus_real_magic: - jmp bogus_real_magic - -.code64 bogus_64_magic: - jmp bogus_64_magic - -/* This code uses an extended set of video mode numbers. These include: - * Aliases for standard modes - * NORMAL_VGA (-1) - * EXTENDED_VGA (-2) - * ASK_VGA (-3) - * Video modes numbered by menu position -- NOT RECOMMENDED because of lack - * of compatibility when extending the table. These are between 0x00 and 0xff. - */ -#define VIDEO_FIRST_MENU 0x0000 - -/* Standard BIOS video modes (BIOS number + 0x0100) */ -#define VIDEO_FIRST_BIOS 0x0100 - -/* VESA BIOS video modes (VESA number + 0x0200) */ -#define VIDEO_FIRST_VESA 0x0200 - -/* Video7 special modes (BIOS number + 0x0900) */ -#define VIDEO_FIRST_V7 0x0900 - -# Setting of user mode (AX=mode ID) => CF=success - -# For now, we only handle VESA modes (0x0200..0x03ff). To handle other -# modes, we should probably compile in the video code from the boot -# directory. -.code16 -mode_set: - movw %ax, %bx - subb $VIDEO_FIRST_VESA>>8, %bh - cmpb $2, %bh - jb check_vesa - -setbad: - clc - ret - -check_vesa: - orw $0x4000, %bx # Use linear frame buffer - movw $0x4f02, %ax # VESA BIOS mode set call - int $0x10 - cmpw $0x004f, %ax # AL=4f if implemented - jnz setbad # AH=0 if OK - - stc - ret - -wakeup_stack_begin: # Stack grows down - -.org 0xff0 -wakeup_stack: # Just below end of page - -.org 0x1000 -ENTRY(wakeup_level4_pgt) - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE - .fill 510,8,0 - /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ - .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE - -ENTRY(wakeup_end) - -## -# acpi_copy_wakeup_routine -# -# Copy the above routine to low memory. -# -# Parameters: -# %rdi: place to copy wakeup routine to -# -# Returned address is location of code in low memory (past data and stack) -# - .code64 -ENTRY(acpi_copy_wakeup_routine) - pushq %rax - pushq %rdx - - movl saved_video_mode, %edx - movl %edx, video_mode - wakeup_start (,%rdi) - movl acpi_realmode_flags, %edx - movl %edx, realmode_flags - wakeup_start (,%rdi) - movq $0x12345678, real_magic - wakeup_start (,%rdi) - movq $0x123456789abcdef0, %rdx - movq %rdx, saved_magic - - movq saved_magic, %rax - movq $0x123456789abcdef0, %rdx - cmpq %rdx, %rax - jne bogus_64_magic - - # restore the regs we used - popq %rdx - popq %rax -ENTRY(do_suspend_lowlevel_s4bios) - ret + jmp bogus_64_magic .align 2 .p2align 4,,15 @@ -414,7 +119,7 @@ do_suspend_lowlevel: jmp restore_processor_state .LFE5: .Lfe5: - .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel + .size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel .data ALIGN diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S new file mode 100644 index 000000000000..6ff3b5730575 --- /dev/null +++ b/arch/x86/kernel/acpi/wakeup_rm.S @@ -0,0 +1,10 @@ +/* + * Wrapper script for the realmode binary as a transport object + * before copying to low memory. + */ + .section ".rodata","a" + .globl wakeup_code_start, wakeup_code_end +wakeup_code_start: + .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" +wakeup_code_end: + .size wakeup_code_start, .-wakeup_code_start diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index a720f3d5ed9d..7f6c0c85c8f6 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -27,6 +27,7 @@ #include #include #include +#include struct e820map e820; @@ -58,8 +59,8 @@ struct early_res { }; static struct early_res early_res[MAX_EARLY_RES] __initdata = { { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ -#ifdef CONFIG_SMP - { SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE, "SMP_TRAMPOLINE" }, +#ifdef CONFIG_X86_TRAMPOLINE + { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" }, #endif {} }; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index c1d7a877d814..10a1955bb1d1 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -132,10 +132,6 @@ ident_complete: addq %rbp, trampoline_level4_pgt + 0(%rip) addq %rbp, trampoline_level4_pgt + (511*8)(%rip) #endif -#ifdef CONFIG_ACPI_SLEEP - addq %rbp, wakeup_level4_pgt + 0(%rip) - addq %rbp, wakeup_level4_pgt + (511*8)(%rip) -#endif /* Due to ENTRY(), sometimes the empty space gets filled with * zeros. Better take a jmp than relying on empty space being diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 4b198d9d0de3..5b0bffb7fcc9 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c @@ -192,7 +192,7 @@ EXPORT_SYMBOL(ist_info); extern void early_cpu_init(void); extern int root_mountflags; -unsigned long saved_videomode; +unsigned long saved_video_mode; #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 @@ -763,7 +763,7 @@ void __init setup_arch(char **cmdline_p) edid_info = boot_params.edid_info; apm_info.bios = boot_params.apm_bios_info; ist_info = boot_params.ist_info; - saved_videomode = boot_params.hdr.vid_mode; + saved_video_mode = boot_params.hdr.vid_mode; if( boot_params.sys_desc_table.length != 0 ) { set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); machine_id = boot_params.sys_desc_table.table[0]; diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index b80300710c08..674ef3510cdf 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ca3929b16049..424600e671bd 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -140,7 +141,7 @@ static atomic_t init_deasserted; static int boot_cpu_logical_apicid; /* ready for x86_64, no harm for x86, since it will overwrite after alloc */ -unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); +unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -554,8 +555,7 @@ cpumask_t cpu_coregroup_map(int cpu) * bootstrap into the page concerned. The caller * has made sure it's suitably aligned. */ - -unsigned long __cpuinit setup_trampoline(void) +unsigned long setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 2a07e67d6697..894293c598db 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -30,12 +30,7 @@ #include #include -/* We can free up trampoline after bootup if cpu hotplug is not supported. */ -#ifndef CONFIG_HOTPLUG_CPU -.section .cpuinit.data, "aw", @progbits -#else .section .rodata, "a", @progbits -#endif .code16 diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 4397235c2e30..be7235bf105d 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -27,6 +27,7 @@ #include #include #include +#include /* TLB state -- visible externally, indexed physically */ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 3496e1c299b2..62ebdec394b9 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -45,22 +45,12 @@ DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); -/* - * Trampoline 80x86 program as an array. - */ -extern const unsigned char trampoline_data []; -extern const unsigned char trampoline_end []; -extern unsigned char *trampoline_base; - /* Static state in head.S used to set up a CPU */ extern struct { void *sp; unsigned short ss; } stack_start; -extern unsigned long init_rsp; -extern unsigned long initial_code; - struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); @@ -130,9 +120,6 @@ extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); -#define SMP_TRAMPOLINE_BASE 0x6000 -extern unsigned long setup_trampoline(void); - void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h new file mode 100644 index 000000000000..b156b08d0131 --- /dev/null +++ b/include/asm-x86/trampoline.h @@ -0,0 +1,21 @@ +#ifndef __TRAMPOLINE_HEADER +#define __TRAMPOLINE_HEADER + +#ifndef __ASSEMBLY__ + +/* + * Trampoline 80x86 program as an array. + */ +extern const unsigned char trampoline_data []; +extern const unsigned char trampoline_end []; +extern unsigned char *trampoline_base; + +extern unsigned long init_rsp; +extern unsigned long initial_code; + +#define TRAMPOLINE_BASE 0x6000 +extern unsigned long setup_trampoline(void); + +#endif /* __ASSEMBLY__ */ + +#endif /* __TRAMPOLINE_HEADER */ -- cgit v1.2.1 From 77ad386e596c6b0930cc2e09e3cce485e3ee7f72 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 21 Mar 2008 15:23:19 +0100 Subject: x86: standalone trampoline code move the trampoline setup code out of smpboot.c - UP kernels can have suspend support too. Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/smpboot.c | 15 --------------- arch/x86/kernel/trampoline.c | 18 ++++++++++++++++++ 3 files changed, 19 insertions(+), 15 deletions(-) create mode 100644 arch/x86/kernel/trampoline.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fdd8395e0ed3..530ed6a4a031 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -28,6 +28,7 @@ obj-y += alternative.o i8253.o obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o obj-y += tsc_$(BITS).o io_delay.o rtc.o +obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o obj-y += i387.o obj-y += ptrace.o obj-y += ds.o diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 424600e671bd..e6abe8a49b1f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -140,9 +140,6 @@ static atomic_t init_deasserted; static int boot_cpu_logical_apicid; -/* ready for x86_64, no harm for x86, since it will overwrite after alloc */ -unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); - /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -550,18 +547,6 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } -/* - * Currently trivial. Write the real->protected mode - * bootstrap into the page concerned. The caller - * has made sure it's suitably aligned. - */ -unsigned long setup_trampoline(void) -{ - memcpy(trampoline_base, trampoline_data, - trampoline_end - trampoline_data); - return virt_to_phys(trampoline_base); -} - #ifdef CONFIG_X86_32 /* * We are called very early to get the low memory for the diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c new file mode 100644 index 000000000000..abbf199adebb --- /dev/null +++ b/arch/x86/kernel/trampoline.c @@ -0,0 +1,18 @@ +#include + +#include + +/* ready for x86_64, no harm for x86, since it will overwrite after alloc */ +unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); + +/* + * Currently trivial. Write the real->protected mode + * bootstrap into the page concerned. The caller + * has made sure it's suitably aligned. + */ +unsigned long setup_trampoline(void) +{ + memcpy(trampoline_base, trampoline_data, + trampoline_end - trampoline_data); + return virt_to_phys(trampoline_base); +} -- cgit v1.2.1