diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2011-08-04 16:13:20 -0700 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-08-04 16:13:20 -0700 |
commit | 17b0436077d99211d8b26886235a36c5ec54ac57 (patch) | |
tree | 8bcd9b4a0f8285f749814e95ae0365c611ba2392 /arch/x86 | |
parent | aafade242ff24fac3aabf61c7861dfa44a3c2445 (diff) | |
parent | 02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff) | |
download | talos-obmc-linux-17b0436077d99211d8b26886235a36c5ec54ac57.tar.gz talos-obmc-linux-17b0436077d99211d8b26886235a36c5ec54ac57.zip |
Merge commit 'v3.0' into x86/vdso
Diffstat (limited to 'arch/x86')
35 files changed, 328 insertions, 119 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1e41bccdcb8..c05810c0d096 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1174,7 +1174,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" - depends on NUMA && PCI + depends on X86_64 && NUMA && PCI ---help--- Enable AMD NUMA node topology detection. You should say Y here if you have a multi processor AMD system. This uses an old method to diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h index 2fefa501d3ba..af60d8a2e288 100644 --- a/arch/x86/include/asm/apb_timer.h +++ b/arch/x86/include/asm/apb_timer.h @@ -62,7 +62,7 @@ extern int sfi_mtimer_num; #else /* CONFIG_APB_TIMER */ static inline unsigned long apbt_quick_calibrate(void) {return 0; } -static inline void apbt_time_init(void) {return 0; } +static inline void apbt_time_init(void) { } #endif #endif /* ASM_X86_APBT_H */ diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 19ae14ba6978..0cd3800f33b9 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -4,7 +4,6 @@ #define ARCH_DISCARD_MEMBLOCK u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); -void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); @@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_memory_in_range(u64 addr, u64 limit); +bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); #endif diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 5e83a416eca8..ffa037f28d39 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn) #endif } -/* - * Following are macros that each numa implmentation must define. - */ - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) \ -({ \ - pg_data_t *__pgdat = NODE_DATA(nid); \ - __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ -}) - static inline int pfn_valid(int pfn) { int nid = pfn_to_nid(pfn); @@ -68,6 +57,8 @@ static inline int pfn_valid(int pfn) return 0; } +#define early_pfn_valid(pfn) pfn_valid((pfn)) + #endif /* CONFIG_DISCONTIGMEM */ #ifdef CONFIG_NEED_MULTIPLE_NODES diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index b3f88d7867c7..129d9aa3ceb3 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h @@ -13,8 +13,5 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ - NODE_DATA(nid)->node_spanned_pages) #endif #endif /* _ASM_X86_MMZONE_64_H */ diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 31d84acc1512..a518c0a45044 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) u64 product; #ifdef __i386__ u32 tmp1, tmp2; +#else + ulong tmp; #endif if (shift < 0) @@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); #elif defined(__x86_64__) __asm__ ( - "mul %%rdx ; shrd $32,%%rdx,%%rax" - : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); + "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" + : [lo]"=a"(product), + [hi]"=d"(tmp) + : "0"(delta), + [mul_frac]"rm"((u64)mul_frac)); #else #error implement me! #endif diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index ead21b663117..b4fd836e4053 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S @@ -28,6 +28,8 @@ pmode_cr3: .long 0 /* Saved %cr3 */ pmode_cr4: .long 0 /* Saved %cr4 */ pmode_efer: .quad 0 /* Saved EFER */ pmode_gdt: .quad 0 +pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */ +pmode_behavior: .long 0 /* Wakeup behavior flags */ realmode_flags: .long 0 real_magic: .long 0 trampoline_segment: .word 0 @@ -91,6 +93,18 @@ wakeup_code: /* Call the C code */ calll main + /* Restore MISC_ENABLE before entering protected mode, in case + BIOS decided to clear XD_DISABLE during S3. */ + movl pmode_behavior, %eax + btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax + jnc 1f + + movl pmode_misc_en, %eax + movl pmode_misc_en + 4, %edx + movl $MSR_IA32_MISC_ENABLE, %ecx + wrmsr +1: + /* Do any other stuff... */ #ifndef CONFIG_64BIT diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h index e1828c07e79c..97a29e1430e3 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.h +++ b/arch/x86/kernel/acpi/realmode/wakeup.h @@ -21,6 +21,9 @@ struct wakeup_header { u32 pmode_efer_low; /* Protected mode EFER */ u32 pmode_efer_high; u64 pmode_gdt; + u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */ + u32 pmode_misc_en_high; + u32 pmode_behavior; /* Wakeup routine behavior flags */ u32 realmode_flags; u32 real_magic; u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ @@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header; #define WAKEUP_HEADER_SIGNATURE 0x51ee1111 #define WAKEUP_END_SIGNATURE 0x65a22c82 +/* Wakeup behavior bits */ +#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0 + #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 18a857ba7a25..103b6ab368d3 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void) header->pmode_cr0 = read_cr0(); header->pmode_cr4 = read_cr4_safe(); + header->pmode_behavior = 0; + if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, + &header->pmode_misc_en_low, + &header->pmode_misc_en_high)) + header->pmode_behavior |= + (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; header->real_magic = 0x12345678; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index cd8cbeb5fa34..7c3a95e54ec5 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -30,6 +30,7 @@ #include <asm/proto.h> #include <asm/iommu.h> #include <asm/gart.h> +#include <asm/dma.h> #include <asm/amd_iommu_proto.h> #include <asm/amd_iommu_types.h> #include <asm/amd_iommu.h> @@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev) pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); if (pdev) dev_data->alias = &pdev->dev; + else { + kfree(dev_data); + return -ENOTSUPP; + } atomic_set(&dev_data->bind, 0); @@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev) return 0; } +static void iommu_ignore_device(struct device *dev) +{ + u16 devid, alias; + + devid = get_device_id(dev); + alias = amd_iommu_alias_table[devid]; + + memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); + memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); + + amd_iommu_rlookup_table[devid] = NULL; + amd_iommu_rlookup_table[alias] = NULL; +} + static void iommu_uninit_device(struct device *dev) { kfree(dev->archdata.iommu); @@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void) continue; ret = iommu_init_device(&pdev->dev); - if (ret) + if (ret == -ENOTSUPP) + iommu_ignore_device(&pdev->dev); + else if (ret) goto out_free; } @@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = { .dma_supported = amd_iommu_dma_supported, }; +static unsigned device_dma_ops_init(void) +{ + struct pci_dev *pdev = NULL; + unsigned unhandled = 0; + + for_each_pci_dev(pdev) { + if (!check_device(&pdev->dev)) { + unhandled += 1; + continue; + } + + pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; + } + + return unhandled; +} + /* * The function which clues the AMD IOMMU driver into dma_ops. */ @@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void) int __init amd_iommu_init_dma_ops(void) { struct amd_iommu *iommu; - int ret; + int ret, unhandled; /* * first allocate a default protection domain for every IOMMU we @@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void) swiotlb = 0; /* Make the driver finally visible to the drivers */ - dma_ops = &amd_iommu_dma_ops; + unhandled = device_dma_ops_init(); + if (unhandled && max_pfn > MAX_DMA32_PFN) { + /* There are unhandled devices - initialize swiotlb for them */ + swiotlb = 1; + } amd_iommu_stats_init(); diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 9179c21120a8..bfc8453bd98d 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, { u8 *p = (u8 *)h; u8 *end = p, flags = 0; - u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; - u32 ext_flags = 0; + u16 devid = 0, devid_start = 0, devid_to = 0; + u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; @@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, /* Initializes the device->iommu mapping for the driver */ static int __init init_iommu_devices(struct amd_iommu *iommu) { - u16 i; + u32 i; for (i = iommu->first_device; i <= iommu->last_device; ++i) set_iommu_for_device(iommu, i); @@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) */ static void init_device_table(void) { - u16 devid; + u32 devid; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b961af86bfea..b9338b8cf420 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new) /* * If mask=1, the LVT entry does not generate interrupts while mask=0 - * enables the vector. See also the BKDGs. + * enables the vector. See also the BKDGs. Must be called with + * preemption disabled. */ int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index b511a011b7d0..adc66c3a1fef 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat); /* Direct Legacy VGA I/O traffic to designated IOH */ int uv_set_vga_state(struct pci_dev *pdev, bool decode, - unsigned int command_bits, bool change_bridge) + unsigned int command_bits, u32 flags) { int domain, bus, rc; - PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", - pdev->devfn, decode, command_bits, change_bridge); + PR_DEVEL("devfn %x decode %d cmd %x flags %d\n", + pdev->devfn, decode, command_bits, flags); - if (!change_bridge) + if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) return 0; if ((command_bits & PCI_COMMAND_IO) == 0) diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 690bc8461835..9aeb78a23de4 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/pci.h> #include <linux/of_pci.h> +#include <linux/initrd.h> #include <asm/hpet.h> #include <asm/irq_controller.h> @@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } +#ifdef CONFIG_BLK_DEV_INITRD +void __init early_init_dt_setup_initrd_arch(unsigned long start, + unsigned long end) +{ + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; +} +#endif + void __init add_dtb(u64 data) { initial_dtb = data + offsetof(struct setup_data, data); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 2e4928d45a2d..e1ba8cb24e4e 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override); * Powermanagement idle function, if any.. */ void (*pm_idle)(void); -#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) +#ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(pm_idle); #endif @@ -399,7 +399,7 @@ void default_idle(void) cpu_relax(); } } -#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) +#ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 8d128783af47..a3d0dc59067b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; - set_fs(USER_DS); regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6c9dd922ac0d..ca6f7ab8df33 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, regs->cs = _cs; regs->ss = _ss; regs->flags = X86_EFLAGS_IF; - set_fs(USER_DS); /* * Free the old FP and other extended state */ diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 0c016f727695..9242436e9937 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), }, }, + { /* Handle reboot issue on Acer Aspire one */ + .callback = set_bios_reboot, + .ident = "Acer Aspire One A110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), + }, + }, { } }; @@ -411,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), }, }, + { /* Handle problems with rebooting on the Latitude E6320. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6320", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), + }, + }, + { /* Handle problems with rebooting on the Latitude E5420. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E5420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), + }, + }, + { /* Handle problems with rebooting on the Latitude E6420. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), + }, + }, { } }; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 33a0c11797de..9fd3137230d4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused) per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); + /* + * Wait until the cpu which brought this one up marked it + * online before enabling interrupts. If we don't do that then + * we can end up waking up the softirq thread before this cpu + * reached the active state, which makes the scheduler unhappy + * and schedule the softirq thread on the wrong cpu. This is + * only observable with forced threaded interrupts, but in + * theory it could also happen w/o them. It's just way harder + * to achieve. + */ + while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) + cpu_relax(); + /* enable local interrupts */ local_irq_enable(); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d6e2477feb18..adc98675cda0 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -47,38 +47,40 @@ #define DstDI (5<<1) /* Destination is in ES:(E)DI */ #define DstMem64 (6<<1) /* 64bit memory operand */ #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ -#define DstMask (7<<1) +#define DstDX (8<<1) /* Destination is in DX register */ +#define DstMask (0xf<<1) /* Source operand type. */ -#define SrcNone (0<<4) /* No source operand. */ -#define SrcReg (1<<4) /* Register operand. */ -#define SrcMem (2<<4) /* Memory operand. */ -#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ -#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ -#define SrcImm (5<<4) /* Immediate operand. */ -#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ -#define SrcOne (7<<4) /* Implied '1' */ -#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ -#define SrcImmU (9<<4) /* Immediate operand, unsigned */ -#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ -#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ -#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ -#define SrcAcc (0xd<<4) /* Source Accumulator */ -#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ -#define SrcMask (0xf<<4) +#define SrcNone (0<<5) /* No source operand. */ +#define SrcReg (1<<5) /* Register operand. */ +#define SrcMem (2<<5) /* Memory operand. */ +#define SrcMem16 (3<<5) /* Memory operand (16-bit). */ +#define SrcMem32 (4<<5) /* Memory operand (32-bit). */ +#define SrcImm (5<<5) /* Immediate operand. */ +#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */ +#define SrcOne (7<<5) /* Implied '1' */ +#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */ +#define SrcImmU (9<<5) /* Immediate operand, unsigned */ +#define SrcSI (0xa<<5) /* Source is in the DS:RSI */ +#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */ +#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */ +#define SrcAcc (0xd<<5) /* Source Accumulator */ +#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */ +#define SrcDX (0xf<<5) /* Source is in DX register */ +#define SrcMask (0xf<<5) /* Generic ModRM decode. */ -#define ModRM (1<<8) +#define ModRM (1<<9) /* Destination is only written; never read. */ -#define Mov (1<<9) -#define BitOp (1<<10) -#define MemAbs (1<<11) /* Memory operand is absolute displacement */ -#define String (1<<12) /* String instruction (rep capable) */ -#define Stack (1<<13) /* Stack instruction (push/pop) */ -#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ -#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ -#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ -#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ -#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ -#define Sse (1<<17) /* SSE Vector instruction */ +#define Mov (1<<10) +#define BitOp (1<<11) +#define MemAbs (1<<12) /* Memory operand is absolute displacement */ +#define String (1<<13) /* String instruction (rep capable) */ +#define Stack (1<<14) /* Stack instruction (push/pop) */ +#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ +#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ +#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ +#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ +#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ +#define Sse (1<<18) /* SSE Vector instruction */ /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define VendorSpecific (1<<22) /* Vendor specific instruction */ @@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = { I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), - D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ - D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ + D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */ + D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte)), /* 0x80 - 0x87 */ @@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = { /* 0xE8 - 0xEF */ D(SrcImm | Stack), D(SrcImm | ImplicitOps), D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), - D2bvIP(SrcNone | DstAcc, in, check_perm_in), - D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), + D2bvIP(SrcDX | DstAcc, in, check_perm_in), + D2bvIP(SrcAcc | DstDX, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), @@ -3370,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; struct opcode opcode; - struct operand memop = { .type = OP_NONE }; + struct operand memop = { .type = OP_NONE }, *memopp = NULL; c->eip = ctxt->eip; c->fetch.start = c->eip; @@ -3545,9 +3547,6 @@ done_prefixes: if (memop.type == OP_MEM && c->ad_bytes != 8) memop.addr.mem.ea = (u32)memop.addr.mem.ea; - if (memop.type == OP_MEM && c->rip_relative) - memop.addr.mem.ea += c->eip; - /* * Decode and fetch the source operand: register, memory * or immediate. @@ -3569,6 +3568,7 @@ done_prefixes: c->op_bytes; srcmem_common: c->src = memop; + memopp = &c->src; break; case SrcImmU16: rc = decode_imm(ctxt, &c->src, 2, false); @@ -3613,6 +3613,12 @@ done_prefixes: memop.bytes = c->op_bytes + 2; goto srcmem_common; break; + case SrcDX: + c->src.type = OP_REG; + c->src.bytes = 2; + c->src.addr.reg = &c->regs[VCPU_REGS_RDX]; + fetch_register_operand(&c->src); + break; } if (rc != X86EMUL_CONTINUE) @@ -3659,6 +3665,7 @@ done_prefixes: case DstMem: case DstMem64: c->dst = memop; + memopp = &c->dst; if ((c->d & DstMask) == DstMem64) c->dst.bytes = 8; else @@ -3682,14 +3689,23 @@ done_prefixes: c->dst.addr.mem.seg = VCPU_SREG_ES; c->dst.val = 0; break; + case DstDX: + c->dst.type = OP_REG; + c->dst.bytes = 2; + c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; + fetch_register_operand(&c->dst); + break; case ImplicitOps: /* Special instructions do their own operand decoding. */ default: c->dst.type = OP_NONE; /* Disable writeback. */ - return 0; + break; } done: + if (memopp && memopp->type == OP_MEM && c->rip_relative) + memopp->addr.mem.ea += c->eip; + return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } @@ -4027,7 +4043,6 @@ special_insn: break; case 0xec: /* in al,dx */ case 0xed: /* in (e/r)ax,dx */ - c->src.val = c->regs[VCPU_REGS_RDX]; do_io_in: if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, &c->dst.val)) @@ -4035,7 +4050,6 @@ special_insn: break; case 0xee: /* out dx,al */ case 0xef: /* out dx,(e/r)ax */ - c->dst.val = c->regs[VCPU_REGS_RDX]; do_io_out: ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, &c->src.val, 1); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bd14bb4c8594..aee38623b768 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) { - return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); + return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); } static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 6c4dc010c4cb..9d03ad4dd5ec 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gva_t addr, u32 access) { pt_element_t pte; - pt_element_t __user *ptep_user; + pt_element_t __user *uninitialized_var(ptep_user); gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4c3fa0f67469..d48ec60ea421 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { - vmx_decache_cr3(vcpu); + if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d865c4aeec55..bbaaa005bf0e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -28,6 +28,7 @@ #include <linux/poison.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/nmi.h> #include <linux/gfp.h> @@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma) } #ifdef CONFIG_X86_UV -#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) - unsigned long memory_block_size_bytes(void) { if (is_uv_system()) { diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index aa1169392b83..992da5ec5a64 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -8,7 +8,7 @@ #include <linux/range.h> /* Check for already reserved areas */ -static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) +bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) { struct memblock_region *r; u64 addr = *addrp, last; @@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) if (addr >= ei_last) continue; *sizep = ei_last - addr; - while (check_with_memblock_reserved_size(&addr, sizep, align)) + while (memblock_x86_check_reserved_size(&addr, sizep, align)) ; if (*sizep) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cf9750004a08..68894fdc034b 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy) static int nmi_start(void) { get_online_cpus(); - on_each_cpu(nmi_cpu_start, NULL, 1); ctr_running = 1; + /* make ctr_running visible to the nmi handler: */ + smp_mb(); + on_each_cpu(nmi_cpu_start, NULL, 1); put_online_cpus(); return 0; } @@ -504,15 +506,18 @@ static int nmi_setup(void) nmi_enabled = 0; ctr_running = 0; - barrier(); + /* make variables visible to the nmi handler: */ + smp_mb(); err = register_die_notifier(&profile_exceptions_nb); if (err) goto fail; get_online_cpus(); register_cpu_notifier(&oprofile_cpu_nb); - on_each_cpu(nmi_cpu_setup, NULL, 1); nmi_enabled = 1; + /* make nmi_enabled visible to the nmi handler: */ + smp_mb(); + on_each_cpu(nmi_cpu_setup, NULL, 1); put_online_cpus(); return 0; @@ -531,7 +536,8 @@ static void nmi_shutdown(void) nmi_enabled = 0; ctr_running = 0; put_online_cpus(); - barrier(); + /* make variables visible to the nmi handler: */ + smp_mb(); unregister_die_notifier(&profile_exceptions_nb); msrs = &get_cpu_var(cpu_msrs); model->shutdown(msrs); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 9fd8a567fe1e..9cbb710dc94b 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off) return 0; } +/* + * This runs only on the current cpu. We try to find an LVT offset and + * setup the local APIC. For this we must disable preemption. On + * success we initialize all nodes with this offset. This updates then + * the offset in the IBS_CTL per-node msr. The per-core APIC setup of + * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_- + * amd_cpu_shutdown() using the new offset. + */ static int force_ibs_eilvt_setup(void) { int offset; int ret; - /* - * find the next free available EILVT entry, skip offset 0, - * pin search to this cpu - */ preempt_disable(); + /* find the next free available EILVT entry, skip offset 0 */ for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { if (get_eilvt(offset)) break; diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 0972315c3860..68c3c1395202 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point) return false; } -static void coalesce_windows(struct pci_root_info *info, int type) +static void coalesce_windows(struct pci_root_info *info, unsigned long type) { int i, j; struct resource *res1, *res2; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 8214724ce54d..f567965c0620 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -327,7 +327,7 @@ int __init pci_xen_hvm_init(void) } #ifdef CONFIG_XEN_DOM0 -static int xen_register_pirq(u32 gsi, int triggering) +static int xen_register_pirq(u32 gsi, int gsi_override, int triggering) { int rc, pirq, irq = -1; struct physdev_map_pirq map_irq; @@ -344,16 +344,18 @@ static int xen_register_pirq(u32 gsi, int triggering) shareable = 1; name = "ioapic-level"; } - pirq = xen_allocate_pirq_gsi(gsi); if (pirq < 0) goto out; - irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); + if (gsi_override >= 0) + irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name); + else + irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); if (irq < 0) goto out; - printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq); + printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi); map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; @@ -370,7 +372,7 @@ out: return irq; } -static int xen_register_gsi(u32 gsi, int triggering, int polarity) +static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) { int rc, irq; struct physdev_setup_gsi setup_gsi; @@ -381,7 +383,7 @@ static int xen_register_gsi(u32 gsi, int triggering, int polarity) printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", gsi, triggering, polarity); - irq = xen_register_pirq(gsi, triggering); + irq = xen_register_pirq(gsi, gsi_override, triggering); setup_gsi.gsi = gsi; setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); @@ -403,6 +405,8 @@ static __init void xen_setup_acpi_sci(void) int rc; int trigger, polarity; int gsi = acpi_sci_override_gsi; + int irq = -1; + int gsi_override = -1; if (!gsi) return; @@ -419,7 +423,25 @@ static __init void xen_setup_acpi_sci(void) printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " "polarity=%d\n", gsi, trigger, polarity); - gsi = xen_register_gsi(gsi, trigger, polarity); + /* Before we bind the GSI to a Linux IRQ, check whether + * we need to override it with bus_irq (IRQ) value. Usually for + * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so: + * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level) + * but there are oddballs where the IRQ != GSI: + * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level) + * which ends up being: gsi_to_irq[9] == 20 + * (which is what acpi_gsi_to_irq ends up calling when starting the + * the ACPI interpreter and keels over since IRQ 9 has not been + * setup as we had setup IRQ 20 for it). + */ + /* Check whether the GSI != IRQ */ + if (acpi_gsi_to_irq(gsi, &irq) == 0) { + if (irq >= 0 && irq != gsi) + /* Bugger, we MUST have that IRQ. */ + gsi_override = irq; + } + + gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity); printk(KERN_INFO "xen: acpi sci %d\n", gsi); return; @@ -428,7 +450,7 @@ static __init void xen_setup_acpi_sci(void) static int acpi_register_gsi_xen(struct device *dev, u32 gsi, int trigger, int polarity) { - return xen_register_gsi(gsi, trigger, polarity); + return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); } static int __init pci_xen_initial_domain(void) @@ -467,7 +489,7 @@ void __init xen_setup_pirqs(void) if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) continue; - xen_register_pirq(irq, + xen_register_pirq(irq, -1 /* no GSI override */, trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); } } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 0d3a4fa34560..899e393d8e73 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void) for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; - unsigned long long start = md->phys_addr; - unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; + u64 start = md->phys_addr; + u64 size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; - - memblock_x86_reserve_range(start, start + size, "EFI Boot"); + /* Only reserve where possible: + * - Not within any already allocated areas + * - Not over any memory area (really needed, if above?) + * - Not within any part of the kernel + * - Not the bios reserved area + */ + if ((start+size >= virt_to_phys(_text) + && start <= virt_to_phys(_end)) || + !e820_all_mapped(start, start+size, E820_RAM) || + memblock_x86_check_reserved_size(&start, &size, + 1<<EFI_PAGE_SHIFT)) { + /* Could not reserve, skip it */ + md->num_pages = 0; + memblock_dbg(PFX "Could not reserve boot range " + "[0x%010llx-0x%010llx]\n", + start, start+size-1); + } else + memblock_x86_reserve_range(start, start+size, + "EFI Boot"); } } @@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void) md->type != EFI_BOOT_SERVICES_DATA) continue; + /* Could not reserve boot area */ + if (!size) + continue; + free_bootmem_late(start, size); } } @@ -483,9 +504,6 @@ void __init efi_init(void) x86_platform.set_wallclock = efi_set_rtc_mmss; #endif - /* Setup for EFI runtime service */ - reboot_type = BOOT_EFI; - #if EFI_DEBUG print_efi_memmap(); #endif diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index dd7b88f2ec7a..5525163a0398 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1033,6 +1033,13 @@ static void xen_machine_halt(void) xen_reboot(SHUTDOWN_poweroff); } +static void xen_machine_power_off(void) +{ + if (pm_power_off) + pm_power_off(); + xen_reboot(SHUTDOWN_poweroff); +} + static void xen_crash_shutdown(struct pt_regs *regs) { xen_reboot(SHUTDOWN_crash); @@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void) static const struct machine_ops xen_machine_ops __initconst = { .restart = xen_restart, .halt = xen_machine_halt, - .power_off = xen_machine_halt, + .power_off = xen_machine_power_off, .shutdown = xen_machine_halt, .crash_shutdown = xen_crash_shutdown, .emergency_restart = xen_emergency_restart, diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index dc708dcc62f1..0ccccb67a993 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -59,6 +59,7 @@ #include <asm/page.h> #include <asm/init.h> #include <asm/pat.h> +#include <asm/smp.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> @@ -1231,7 +1232,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, { struct { struct mmuext_op op; +#ifdef CONFIG_SMP + DECLARE_BITMAP(mask, num_processors); +#else DECLARE_BITMAP(mask, NR_CPUS); +#endif } *args; struct multicall_space mcs; @@ -1599,6 +1604,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; +#ifdef CONFIG_X86_32 + if (pfn > max_pfn_mapped) + max_pfn_mapped = pfn; +#endif + if (!pte_none(pte_page[pteidx])) continue; @@ -1766,7 +1776,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + + xen_start_info->nr_pt_frames * PAGE_SIZE + + 512*1024); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 8bff7e7c290b..1b2b73ff0a6e 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args) unsigned argidx = roundup(b->argidx, sizeof(u64)); BUG_ON(preemptible()); - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); if (b->mcidx == MC_BATCH || - (argidx + args) > MC_ARGS) { + (argidx + args) >= MC_ARGS) { mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); xen_mc_flush(); argidx = roundup(b->argidx, sizeof(u64)); @@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args) ret.args = &b->args[argidx]; b->argidx = argidx + args; - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); return ret; } @@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) struct multicall_space ret = { NULL, NULL }; BUG_ON(preemptible()); - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); if (b->mcidx == 0) return ret; @@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) if (b->entries[b->mcidx - 1].op != op) return ret; - if ((b->argidx + size) > MC_ARGS) + if ((b->argidx + size) >= MC_ARGS) return ret; ret.mc = &b->entries[b->mcidx - 1]; ret.args = &b->args[b->argidx]; b->argidx += size; - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); return ret; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index be1a464f6d66..60aeeb56948f 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -227,11 +227,7 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; -#ifdef CONFIG_X86_32 xen_extra_mem_start = mem_end; -#else - xen_extra_mem_start = max((1ULL << 32), mem_end); -#endif for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -266,6 +262,12 @@ char * __init xen_memory_setup(void) if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } + /* Align the balloon area so that max_low_pfn does not get set + * to be at the _end_ of the PCI gap at the far end (fee01000). + * Note that xen_extra_mem_start gets set in the loop above to be + * past the last E820 region. */ + if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) + xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 41038c01de40..b4533a86d7e4 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void) static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; + unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; + + for_each_possible_cpu(i) { + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); + } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) |