diff options
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Kconfig | 2 | ||||
-rw-r--r-- | arch/i386/boot/video.S | 14 | ||||
-rw-r--r-- | arch/i386/defconfig | 43 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 23 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/earlyquirk.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 117 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 34 | ||||
-rw-r--r-- | arch/i386/kernel/hpet.c | 70 | ||||
-rw-r--r-- | arch/i386/kernel/i386_ksyms.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/i8253.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 71 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 137 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 16 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 40 | ||||
-rw-r--r-- | arch/i386/lib/usercopy.c | 9 | ||||
-rw-r--r-- | arch/i386/mm/highmem.c | 2 | ||||
-rw-r--r-- | arch/i386/pci/common.c | 14 |
19 files changed, 486 insertions, 136 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 27e8453274e6..53d62373a524 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -220,7 +220,7 @@ config PARAVIRT config VMI bool "VMI Paravirt-ops support" - depends on PARAVIRT + depends on PARAVIRT && !COMPAT_VDSO help VMI provides a paravirtualized interface to the VMware ESX server (it could be used by other hypervisors in theory too, but is not diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S index 2c5b5cc55f79..8143c9516cb4 100644 --- a/arch/i386/boot/video.S +++ b/arch/i386/boot/video.S @@ -571,6 +571,16 @@ setr1: lodsw jmp _m_s check_vesa: +#ifdef CONFIG_FIRMWARE_EDID + leaw modelist+1024, %di + movw $0x4f00, %ax + int $0x10 + cmpw $0x004f, %ax + jnz setbad + + movw 4(%di), %ax + movw %ax, vbe_version +#endif leaw modelist+1024, %di subb $VIDEO_FIRST_VESA>>8, %bh movw %bx, %cx # Get mode information structure @@ -1945,6 +1955,9 @@ store_edid: rep stosl + cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0 + jl no_edid + pushw %es # save ES xorw %di, %di # Report Capability pushw %di @@ -1987,6 +2000,7 @@ do_restore: .byte 0 # Screen contents altered during mode change svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes graphic_mode: .byte 0 # Graphic mode with a linear frame buffer dac_size: .byte 6 # DAC bit depth +vbe_version: .word 0 # VBE bios version # Status messages keymsg: .ascii "Press <RETURN> to see video modes available, " diff --git a/arch/i386/defconfig b/arch/i386/defconfig index 5ae1e0bc8fd7..f4efd66e1ee5 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig @@ -1,10 +1,13 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.20-git8 -# Tue Feb 13 11:25:18 2007 +# Linux kernel version: 2.6.21-rc3 +# Wed Mar 7 15:29:47 2007 # CONFIG_X86_32=y CONFIG_GENERIC_TIME=y +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_SEMAPHORE_SLEEPERS=y @@ -34,6 +37,7 @@ CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y # CONFIG_IPC_NS is not set +CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set @@ -44,6 +48,7 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_CPUSETS is not set CONFIG_SYSFS_DEPRECATED=y # CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y @@ -103,6 +108,9 @@ CONFIG_DEFAULT_IOSCHED="anticipatory" # # Processor type and features # +# CONFIG_TICK_ONESHOT is not set +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set CONFIG_SMP=y # CONFIG_X86_PC is not set # CONFIG_X86_ELAN is not set @@ -235,10 +243,8 @@ CONFIG_ACPI_PROCFS=y CONFIG_ACPI_AC=y CONFIG_ACPI_BATTERY=y CONFIG_ACPI_BUTTON=y -# CONFIG_ACPI_HOTKEY is not set CONFIG_ACPI_FAN=y # CONFIG_ACPI_DOCK is not set -# CONFIG_ACPI_BAY is not set CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_THERMAL=y # CONFIG_ACPI_ASUS is not set @@ -289,6 +295,7 @@ CONFIG_X86_POWERNOW_K8_ACPI=y # CONFIG_X86_CPUFREQ_NFORCE2 is not set # CONFIG_X86_LONGRUN is not set # CONFIG_X86_LONGHAUL is not set +# CONFIG_X86_E_POWERSAVER is not set # # shared options @@ -368,7 +375,7 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set +CONFIG_INET_TUNNEL=y CONFIG_INET_XFRM_MODE_TRANSPORT=y CONFIG_INET_XFRM_MODE_TUNNEL=y # CONFIG_INET_XFRM_MODE_BEET is not set @@ -470,7 +477,13 @@ CONFIG_FW_LOADER=y # # Plug and Play support # -# CONFIG_PNP is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG is not set + +# +# Protocols +# +CONFIG_PNPACPI=y # # Block devices @@ -490,7 +503,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 -CONFIG_BLK_DEV_INITRD=y # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set @@ -500,6 +512,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_IBM_ASM is not set # CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set +# CONFIG_SONY_LAPTOP is not set # # ATA/ATAPI/MFM/RLL support @@ -526,6 +539,7 @@ CONFIG_BLK_DEV_IDEACPI=y # CONFIG_IDE_GENERIC=y # CONFIG_BLK_DEV_CMD640 is not set +# CONFIG_BLK_DEV_IDEPNP is not set CONFIG_BLK_DEV_IDEPCI=y # CONFIG_IDEPCI_SHARE_IRQ is not set # CONFIG_BLK_DEV_OFFBOARD is not set @@ -679,6 +693,7 @@ CONFIG_SATA_VIA=y # CONFIG_SATA_VITESSE is not set # CONFIG_SATA_INIC162X is not set CONFIG_SATA_INTEL_COMBINED=y +CONFIG_SATA_ACPI=y # CONFIG_PATA_ALI is not set # CONFIG_PATA_AMD is not set # CONFIG_PATA_ARTOP is not set @@ -786,6 +801,7 @@ CONFIG_NETDEVICES=y # CONFIG_BONDING is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set +# CONFIG_NET_SB1000 is not set # # ARCnet devices @@ -979,6 +995,7 @@ CONFIG_HW_CONSOLE=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_PNP=y CONFIG_SERIAL_8250_NR_UARTS=4 CONFIG_SERIAL_8250_RUNTIME_UARTS=4 # CONFIG_SERIAL_8250_EXTENDED is not set @@ -1065,6 +1082,11 @@ CONFIG_HANGCHECK_TIMER=y # CONFIG_HWMON_VID is not set # +# Multifunction device drivers +# +# CONFIG_MFD_SM501 is not set + +# # Multimedia devices # # CONFIG_VIDEO_DEV is not set @@ -1078,7 +1100,7 @@ CONFIG_HANGCHECK_TIMER=y # # Graphics support # -CONFIG_FIRMWARE_EDID=y +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # CONFIG_FB is not set # @@ -1089,7 +1111,6 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 CONFIG_VIDEO_SELECT=y CONFIG_DUMMY_CONSOLE=y -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # # Sound @@ -1238,6 +1259,7 @@ CONFIG_USB_MON=y # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set +# CONFIG_USB_BERRY_CHARGE is not set # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set @@ -1248,6 +1270,7 @@ CONFIG_USB_MON=y # CONFIG_USB_SISUSBVGA is not set # CONFIG_USB_LD is not set # CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # @@ -1506,6 +1529,7 @@ CONFIG_DEBUG_KERNEL=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set @@ -1525,6 +1549,7 @@ CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_FORCED_INLINING is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set CONFIG_EARLY_PRINTK=y CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_STACK_USAGE is not set diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index e5eb97a910ed..9ea5b8ecc7e1 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -1072,7 +1072,28 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { "ASUS A7V ACPI BIOS Revision 1007"), }, }, - + { + /* + * Latest BIOS for IBM 600E (1.16) has bad pcinum + * for LPC bridge, which is needed for the PCI + * interrupt links to work. DSDT fix is in bug 5966. + * 2645, 2646 model numbers are shared with 600/600E/600X + */ + .callback = disable_acpi_irq, + .ident = "IBM Thinkpad 600 Series 2645", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2645"), + }, + }, + { + .callback = disable_acpi_irq, + .ident = "IBM Thinkpad 600 Series 2646", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), + DMI_MATCH(DMI_BOARD_NAME, "2646"), + }, + }, /* * Boxes that need ACPI PCI IRQ routing and PCI scan disabled */ diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index bf86f7662d8b..a7d22d9f3d7e 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c @@ -14,11 +14,8 @@ #ifdef CONFIG_ACPI -static int nvidia_hpet_detected __initdata; - static int __init nvidia_hpet_check(struct acpi_table_header *header) { - nvidia_hpet_detected = 1; return 0; } #endif @@ -29,9 +26,7 @@ static int __init check_bridge(int vendor, int device) /* According to Nvidia all timer overrides are bogus unless HPET is enabled. */ if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { - nvidia_hpet_detected = 0; - acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check); - if (nvidia_hpet_detected == 0) { + if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) { acpi_skip_timer_override = 1; printk(KERN_INFO "Nvidia board " "detected. Ignoring ACPI " diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 2383bcf18c5d..93aa911646ad 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -28,6 +28,7 @@ #include <linux/clockchips.h> #include <linux/acpi_pmtmr.h> #include <linux/module.h> +#include <linux/dmi.h> #include <asm/atomic.h> #include <asm/smp.h> @@ -61,6 +62,11 @@ static int enable_local_apic __initdata = 0; /* Local APIC timer verification ok */ static int local_apic_timer_verify_ok; +/* Disable local APIC timer from the kernel commandline or via dmi quirk */ +static int local_apic_timer_disabled; +/* Local APIC timer works in C2 */ +int local_apic_timer_c2_ok; +EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); /* * Debug level, exported for io_apic.c @@ -338,6 +344,23 @@ void __init setup_boot_APIC_clock(void) void (*real_handler)(struct clock_event_device *dev); unsigned long deltaj; long delta, deltapm; + int pm_referenced = 0; + + if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) + local_apic_timer_disabled = 1; + + /* + * The local apic timer can be disabled via the kernel + * commandline or from the test above. Register the lapic + * timer as a dummy clock event source on SMP systems, so the + * broadcast mechanism is used. On UP systems simply ignore it. + */ + if (local_apic_timer_disabled) { + /* No broadcast on UP ! */ + if (num_possible_cpus() > 1) + setup_APIC_timer(); + return; + } apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" "calibrating APIC timer ...\n"); @@ -357,7 +380,8 @@ void __init setup_boot_APIC_clock(void) /* Let the interrupts run */ local_irq_enable(); - while(lapic_cal_loops <= LAPIC_CAL_LOOPS); + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) + cpu_relax(); local_irq_disable(); @@ -394,6 +418,7 @@ void __init setup_boot_APIC_clock(void) "%lu (%ld)\n", (unsigned long) res, delta); delta = (long) res; } + pm_referenced = 1; } /* Calculate the scaled math multiplication factor */ @@ -423,69 +448,43 @@ void __init setup_boot_APIC_clock(void) calibration_result / (1000000 / HZ), calibration_result % (1000000 / HZ)); - - apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); - - /* - * Setup the apic timer manually - */ local_apic_timer_verify_ok = 1; - levt->event_handler = lapic_cal_handler; - lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt); - lapic_cal_loops = -1; - /* Let the interrupts run */ - local_irq_enable(); + /* We trust the pm timer based calibration */ + if (!pm_referenced) { + apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); - while(lapic_cal_loops <= LAPIC_CAL_LOOPS); + /* + * Setup the apic timer manually + */ + levt->event_handler = lapic_cal_handler; + lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt); + lapic_cal_loops = -1; - local_irq_disable(); + /* Let the interrupts run */ + local_irq_enable(); - /* Stop the lapic timer */ - lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt); + while(lapic_cal_loops <= LAPIC_CAL_LOOPS) + cpu_relax(); - local_irq_enable(); + local_irq_disable(); - /* Jiffies delta */ - deltaj = lapic_cal_j2 - lapic_cal_j1; - apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); + /* Stop the lapic timer */ + lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt); - /* Check, if the PM timer is available */ - deltapm = lapic_cal_pm2 - lapic_cal_pm1; - apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); + local_irq_enable(); - local_apic_timer_verify_ok = 0; + /* Jiffies delta */ + deltaj = lapic_cal_j2 - lapic_cal_j1; + apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj); - if (deltapm) { - if (deltapm > (pm_100ms - pm_thresh) && - deltapm < (pm_100ms + pm_thresh)) { - apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); - /* Check, if the jiffies result is consistent */ - if (deltaj < LAPIC_CAL_LOOPS-2 || - deltaj > LAPIC_CAL_LOOPS+2) { - /* - * Not sure, what we can do about this one. - * When high resultion timers are active - * and the lapic timer does not stop in C3 - * we are fine. Otherwise more trouble might - * be waiting. -- tglx - */ - printk(KERN_WARNING "Global event device %s " - "has wrong frequency " - "(%lu ticks instead of %d)\n", - global_clock_event->name, deltaj, - LAPIC_CAL_LOOPS); - } - local_apic_timer_verify_ok = 1; - } - } else { /* Check, if the jiffies result is consistent */ - if (deltaj >= LAPIC_CAL_LOOPS-2 && - deltaj <= LAPIC_CAL_LOOPS+2) { + if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2) apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); - local_apic_timer_verify_ok = 1; - } - } + else + local_apic_timer_verify_ok = 0; + } else + local_irq_enable(); if (!local_apic_timer_verify_ok) { printk(KERN_WARNING @@ -1203,6 +1202,20 @@ static int __init parse_nolapic(char *arg) } early_param("nolapic", parse_nolapic); +static int __init parse_disable_lapic_timer(char *arg) +{ + local_apic_timer_disabled = 1; + return 0; +} +early_param("nolapic_timer", parse_disable_lapic_timer); + +static int __init parse_lapic_timer_c2_ok(char *arg) +{ + local_apic_timer_c2_ok = 1; + return 0; +} +early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); + static int __init apic_set_verbosity(char *str) { if (strcmp("debug", str) == 0) diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 41cfea57232b..2d47db482972 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -22,6 +22,37 @@ extern void vide(void); __asm__(".align 4\nvide: ret"); +#define ENABLE_C1E_MASK 0x18000000 +#define CPUID_PROCESSOR_SIGNATURE 1 +#define CPUID_XFAM 0x0ff00000 +#define CPUID_XFAM_K8 0x00000000 +#define CPUID_XFAM_10H 0x00100000 +#define CPUID_XFAM_11H 0x00200000 +#define CPUID_XMOD 0x000f0000 +#define CPUID_XMOD_REV_F 0x00040000 + +/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ +static __cpuinit int amd_apic_timer_broken(void) +{ + u32 lo, hi; + u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); + switch (eax & CPUID_XFAM) { + case CPUID_XFAM_K8: + if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) + break; + case CPUID_XFAM_10H: + case CPUID_XFAM_11H: + rdmsr(MSR_K8_ENABLE_C1E, lo, hi); + if (lo & ENABLE_C1E_MASK) + return 1; + break; + default: + /* err on the side of caution */ + return 1; + } + return 0; +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -241,6 +272,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) if (cpuid_eax(0x80000000) >= 0x80000006) num_cache_leaves = 3; + + if (amd_apic_timer_broken()) + set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); } static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c index f3ab61ee7498..17d73459fc5f 100644 --- a/arch/i386/kernel/hpet.c +++ b/arch/i386/kernel/hpet.c @@ -3,6 +3,8 @@ #include <linux/errno.h> #include <linux/hpet.h> #include <linux/init.h> +#include <linux/sysdev.h> +#include <linux/pm.h> #include <asm/hpet.h> #include <asm/io.h> @@ -197,7 +199,7 @@ static int hpet_next_event(unsigned long delta, cnt += delta; hpet_writel(cnt, HPET_T0_CMP); - return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0); + return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; } /* @@ -307,6 +309,7 @@ int __init hpet_enable(void) out_nohpet: iounmap(hpet_virt_address); hpet_virt_address = NULL; + boot_hpet_disable = 1; return 0; } @@ -521,3 +524,68 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } #endif + + +/* + * Suspend/resume part + */ + +#ifdef CONFIG_PM + +static int hpet_suspend(struct sys_device *sys_device, pm_message_t state) +{ + unsigned long cfg = hpet_readl(HPET_CFG); + + cfg &= ~(HPET_CFG_ENABLE|HPET_CFG_LEGACY); + hpet_writel(cfg, HPET_CFG); + + return 0; +} + +static int hpet_resume(struct sys_device *sys_device) +{ + unsigned int id; + + hpet_start_counter(); + + id = hpet_readl(HPET_ID); + + if (id & HPET_ID_LEGSUP) + hpet_enable_int(); + + return 0; +} + +static struct sysdev_class hpet_class = { + set_kset_name("hpet"), + .suspend = hpet_suspend, + .resume = hpet_resume, +}; + +static struct sys_device hpet_device = { + .id = 0, + .cls = &hpet_class, +}; + + +static __init int hpet_register_sysfs(void) +{ + int err; + + if (!is_hpet_capable()) + return 0; + + err = sysdev_class_register(&hpet_class); + + if (!err) { + err = sysdev_register(&hpet_device); + if (err) + sysdev_class_unregister(&hpet_class); + } + + return err; +} + +device_initcall(hpet_register_sysfs); + +#endif diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index e3d4b73bfdb0..4afe26e86260 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c @@ -28,3 +28,5 @@ EXPORT_SYMBOL(__read_lock_failed); #endif EXPORT_SYMBOL(csum_partial); + +EXPORT_SYMBOL(_proxy_pda); diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 5cbb776b3089..10cef5ca8a5b 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c @@ -47,9 +47,17 @@ static void init_pit_timer(enum clock_event_mode mode, outb(LATCH >> 8 , PIT_CH0); /* MSB */ break; - case CLOCK_EVT_MODE_ONESHOT: + /* + * Avoid unnecessary state transitions, as it confuses + * Geode / Cyrix based boxen. + */ case CLOCK_EVT_MODE_SHUTDOWN: + if (evt->mode == CLOCK_EVT_MODE_UNUSED) + break; case CLOCK_EVT_MODE_UNUSED: + if (evt->mode == CLOCK_EVT_MODE_SHUTDOWN) + break; + case CLOCK_EVT_MODE_ONESHOT: /* One shot setup */ outb_p(0x38, PIT_MODE); udelay(10); diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index e4408ff4e674..b3ab8ffebd27 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -736,7 +736,7 @@ failed: return 0; } -int __init irqbalance_disable(char *str) +int __devinit irqbalance_disable(char *str) { irqbalance_disabled = 1; return 1; diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index b8f16633a6ec..cbe7ec8dbb9f 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -567,6 +567,53 @@ static int cpu_request_microcode(int cpu) return error; } +static int apply_microcode_on_cpu(int cpu) +{ + struct cpuinfo_x86 *c = cpu_data + cpu; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + cpumask_t old; + unsigned int val[2]; + int err = 0; + + if (!uci->mc) + return -EINVAL; + + old = current->cpus_allowed; + set_cpus_allowed(current, cpumask_of_cpu(cpu)); + + /* Check if the microcode we have in memory matches the CPU */ + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || + cpu_has(c, X86_FEATURE_IA64) || uci->sig != cpuid_eax(0x00000001)) + err = -EINVAL; + + if (!err && ((c->x86_model >= 5) || (c->x86 > 6))) { + /* get processor flags from MSR 0x17 */ + rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + if (uci->pf != (1 << ((val[1] >> 18) & 7))) + err = -EINVAL; + } + + if (!err) { + wrmsr(MSR_IA32_UCODE_REV, 0, 0); + /* see notes above for revision 1.07. Apparent chip bug */ + sync_core(); + /* get the current revision from MSR 0x8B */ + rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + if (uci->rev != val[1]) + err = -EINVAL; + } + + if (!err) + apply_microcode(cpu); + else + printk(KERN_ERR "microcode: Could not apply microcode to CPU%d:" + " sig=0x%x, pf=0x%x, rev=0x%x\n", + cpu, uci->sig, uci->pf, uci->rev); + + set_cpus_allowed(current, old); + return err; +} + static void microcode_init_cpu(int cpu) { cpumask_t old; @@ -577,7 +624,8 @@ static void microcode_init_cpu(int cpu) set_cpus_allowed(current, cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); collect_cpu_info(cpu); - if (uci->valid && system_state == SYSTEM_RUNNING) + if (uci->valid && system_state == SYSTEM_RUNNING && + !suspend_cpu_hotplug) cpu_request_microcode(cpu); mutex_unlock(µcode_mutex); set_cpus_allowed(current, old); @@ -663,13 +711,24 @@ static int mc_sysdev_add(struct sys_device *sys_dev) return 0; pr_debug("Microcode:CPU %d added\n", cpu); - memset(uci, 0, sizeof(*uci)); + /* If suspend_cpu_hotplug is set, the system is resuming and we should + * use the data from before the suspend. + */ + if (suspend_cpu_hotplug) { + err = apply_microcode_on_cpu(cpu); + if (err) + microcode_fini_cpu(cpu); + } + if (!uci->valid) + memset(uci, 0, sizeof(*uci)); err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); if (err) return err; - microcode_init_cpu(cpu); + if (!uci->valid) + microcode_init_cpu(cpu); + return 0; } @@ -680,7 +739,11 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) if (!cpu_online(cpu)) return 0; pr_debug("Microcode:CPU %d removed\n", cpu); - microcode_fini_cpu(cpu); + /* If suspend_cpu_hotplug is set, the system is suspending and we should + * keep the microcode in memory for the resume. + */ + if (!suspend_cpu_hotplug) + microcode_fini_cpu(cpu); sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); return 0; } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 821df34d2b3a..a98ba88a8c0c 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) /* checks for a bit availability (hack for oprofile) */ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) { + int cpu; BUG_ON(counter > NMI_MAX_COUNTER_BITS); - - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) + return 0; + } + return 1; } /* checks the an msr for availability */ int avail_to_resrv_perfctr_nmi(unsigned int msr) { unsigned int counter; + int cpu; counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); + for_each_possible_cpu (cpu) { + if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) + return 0; + } + return 1; } -int reserve_perfctr_nmi(unsigned int msr) +static int __reserve_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) + if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) return 1; return 0; } -void release_perfctr_nmi(unsigned int msr) +static void __release_perfctr_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_perfctr_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); + clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); } -int reserve_evntsel_nmi(unsigned int msr) +int reserve_perfctr_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_perfctr_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_perfctr_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_perfctr_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_perfctr_nmi(cpu, msr); + } +} + +int __reserve_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) + if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) return 1; return 0; } -void release_evntsel_nmi(unsigned int msr) +static void __release_evntsel_nmi(int cpu, unsigned int msr) { unsigned int counter; + if (cpu < 0) + cpu = smp_processor_id(); counter = nmi_evntsel_msr_to_bit(msr); BUG_ON(counter > NMI_MAX_COUNTER_BITS); - clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); + clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); +} + +int reserve_evntsel_nmi(unsigned int msr) +{ + int cpu, i; + for_each_possible_cpu (cpu) { + if (!__reserve_evntsel_nmi(cpu, msr)) { + for_each_possible_cpu (i) { + if (i >= cpu) + break; + __release_evntsel_nmi(i, msr); + } + return 0; + } + } + return 1; +} + +void release_evntsel_nmi(unsigned int msr) +{ + int cpu; + for_each_possible_cpu (cpu) { + __release_evntsel_nmi(cpu, msr); + } } static __cpuinit inline int nmi_known_cpu(void) @@ -245,14 +310,6 @@ static int __init check_nmi_watchdog(void) unsigned int *prev_nmi_count; int cpu; - /* Enable NMI watchdog for newer systems. - Probably safe on most older systems too, but let's be careful. - IBM ThinkPads use INT10 inside SMM and that allows early NMI inside SMM - which hangs the system. Disable watchdog for all thinkpads */ - if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004 && - !dmi_name_in_vendors("ThinkPad")) - nmi_watchdog = NMI_LOCAL_APIC; - if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) return 0; @@ -271,7 +328,7 @@ static int __init check_nmi_watchdog(void) for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); - mdelay((10*1000)/nmi_hz); // wait 10 ticks + mdelay((20*1000)/nmi_hz); // wait 20 ticks for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP @@ -515,10 +572,10 @@ static int setup_k7_watchdog(void) perfctr_msr = MSR_K7_PERFCTR0; evntsel_msr = MSR_K7_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -541,7 +598,7 @@ static int setup_k7_watchdog(void) wd->check_bit = 1ULL<<63; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -552,8 +609,8 @@ static void stop_k7_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define P6_EVNTSEL0_ENABLE (1 << 22) @@ -571,10 +628,10 @@ static int setup_p6_watchdog(void) perfctr_msr = MSR_P6_PERFCTR0; evntsel_msr = MSR_P6_EVNTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -598,7 +655,7 @@ static int setup_p6_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -609,8 +666,8 @@ static void stop_p6_watchdog(void) wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } /* Note that these events don't tick when the CPU idles. This means @@ -676,10 +733,10 @@ static int setup_p4_watchdog(void) cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); } - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; evntsel = P4_ESCR_EVENT_SELECT(0x3F) @@ -703,7 +760,7 @@ static int setup_p4_watchdog(void) wd->check_bit = 1ULL<<39; return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -715,8 +772,8 @@ static void stop_p4_watchdog(void) wrmsr(wd->cccr_msr, 0, 0); wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL @@ -744,10 +801,10 @@ static int setup_intel_arch_watchdog(void) perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; - if (!reserve_perfctr_nmi(perfctr_msr)) + if (!__reserve_perfctr_nmi(-1, perfctr_msr)) goto fail; - if (!reserve_evntsel_nmi(evntsel_msr)) + if (!__reserve_evntsel_nmi(-1, evntsel_msr)) goto fail1; wrmsrl(perfctr_msr, 0UL); @@ -772,7 +829,7 @@ static int setup_intel_arch_watchdog(void) wd->check_bit = 1ULL << (eax.split.bit_width - 1); return 1; fail1: - release_perfctr_nmi(perfctr_msr); + __release_perfctr_nmi(-1, perfctr_msr); fail: return 0; } @@ -795,8 +852,8 @@ static void stop_intel_arch_watchdog(void) return; wrmsr(wd->evntsel_msr, 0, 0); - release_evntsel_nmi(wd->evntsel_msr); - release_perfctr_nmi(wd->perfctr_msr); + __release_evntsel_nmi(-1, wd->evntsel_msr); + __release_perfctr_nmi(-1, wd->perfctr_msr); } void setup_apic_nmi_watchdog (void *unused) diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 9b0dd2744c82..4ff55e675576 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -45,6 +45,7 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/percpu.h> +#include <linux/nmi.h> #include <linux/delay.h> #include <linux/mc146818rtc.h> @@ -1278,8 +1279,9 @@ void __cpu_die(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu) { + unsigned long flags; #ifdef CONFIG_HOTPLUG_CPU - int ret=0; + int ret = 0; /* * We do warm boot only on cpus that had booted earlier @@ -1297,23 +1299,25 @@ int __cpuinit __cpu_up(unsigned int cpu) /* In case one didn't come up */ if (!cpu_isset(cpu, cpu_callin_map)) { printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); - local_irq_enable(); return -EIO; } - local_irq_enable(); - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Unleash the CPU! */ cpu_set(cpu, smp_commenced_mask); /* - * Check TSC synchronization with the AP: + * Check TSC synchronization with the AP (keep irqs disabled + * while doing so): */ + local_irq_save(flags); check_tsc_sync_source(cpu); + local_irq_restore(flags); - while (!cpu_isset(cpu, cpu_online_map)) + while (!cpu_isset(cpu, cpu_online_map)) { cpu_relax(); + touch_nmi_watchdog(); + } #ifdef CONFIG_X86_GENERICARCH if (num_online_cpus() > 8 && genapic == &apic_default) diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 602660df455c..6cb8f5336732 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c @@ -18,6 +18,8 @@ #include "mach_timer.h" +static int tsc_enabled; + /* * On some systems the TSC frequency does not * change with the cpu frequency. So we need @@ -105,7 +107,7 @@ unsigned long long sched_clock(void) /* * Fall back to jiffies if there's no TSC available: */ - if (unlikely(tsc_disable)) + if (unlikely(!tsc_enabled)) /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); @@ -283,6 +285,7 @@ void mark_tsc_unstable(void) { if (!tsc_unstable) { tsc_unstable = 1; + tsc_enabled = 0; /* Can be called before registration */ if (clocksource_tsc.mult) clocksource_change_rating(&clocksource_tsc, 0); @@ -383,7 +386,9 @@ void __init tsc_init(void) if (check_tsc_unstable()) { clocksource_tsc.rating = 0; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; - } + } else + tsc_enabled = 1; + clocksource_register(&clocksource_tsc); return; diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index fbf45fa08320..edc339fa5038 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -23,7 +23,6 @@ */ #include <linux/module.h> -#include <linux/license.h> #include <linux/cpu.h> #include <linux/bootmem.h> #include <linux/mm.h> @@ -48,7 +47,6 @@ typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); (((VROMLONGFUNC *)(rom->func)) (arg)) static struct vrom_header *vmi_rom; -static int license_gplok; static int disable_pge; static int disable_pse; static int disable_sep; @@ -71,6 +69,7 @@ struct { void (*flush_tlb)(int); void (*set_initial_ap_state)(int, int); void (*halt)(void); + void (*set_lazy_mode)(int mode); } vmi_ops; /* XXX move this to alternative.h */ @@ -576,6 +575,26 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, } #endif +static void vmi_set_lazy_mode(int mode) +{ + static DEFINE_PER_CPU(int, lazy_mode); + + if (!vmi_ops.set_lazy_mode) + return; + + /* Modes should never nest or overlap */ + BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || + mode == PARAVIRT_LAZY_FLUSH)); + + if (mode == PARAVIRT_LAZY_FLUSH) { + vmi_ops.set_lazy_mode(0); + vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); + } else { + vmi_ops.set_lazy_mode(mode); + __get_cpu_var(lazy_mode) = mode; + } +} + static inline int __init check_vmi_rom(struct vrom_header *rom) { struct pci_header *pci; @@ -629,13 +648,14 @@ static inline int __init check_vmi_rom(struct vrom_header *rom) rom->api_version_maj, rom->api_version_min, pci->rom_version_maj, pci->rom_version_min); - license_gplok = license_is_gpl_compatible(license); - if (!license_gplok) { - printk(KERN_WARNING "VMI: ROM license '%s' taints kernel... " - "inlining disabled\n", - license); - add_taint(TAINT_PROPRIETARY_MODULE); - } + /* Don't allow BSD/MIT here for now because we don't want to end up + with any binary only shim layers */ + if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) { + printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n", + license); + return 0; + } + return 1; } @@ -805,7 +825,7 @@ static inline int __init activate_vmi(void) para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); para_fill(set_iopl_mask, SetIOPLMask); para_fill(io_delay, IODelay); - para_fill(set_lazy_mode, SetLazyMode); + para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); /* user and kernel flush are just handled with different flags to FlushTLB */ para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index d22cfc9d656c..086b3726862a 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c @@ -10,6 +10,7 @@ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/backing-dev.h> +#include <linux/interrupt.h> #include <asm/uaccess.h> #include <asm/mmx.h> @@ -719,6 +720,14 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && ((unsigned long )to) < TASK_SIZE) { + /* + * When we are in an atomic section (see + * mm/filemap.c:file_read_actor), return the full + * length to take the slow path. + */ + if (in_atomic()) + return n; + /* * CPU does not honor the WP bit when writing * from supervisory mode, and due to preemption or SMP, diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index bb2de1089add..ac70d09df7ee 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page, enum km_type type) vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); + arch_flush_lazy_mmu_mode(); return (void*) vaddr; } @@ -82,6 +83,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); + arch_flush_lazy_mmu_mode(); return (void*) vaddr; } diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index 1bb069372143..3f78d4d8ecf3 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c @@ -193,6 +193,14 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { }, { .callback = set_bf_sort, + .ident = "Dell PowerEdge R900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"), + }, + }, + { + .callback = set_bf_sort, .ident = "HP ProLiant BL20p G3", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), @@ -426,11 +434,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) if ((err = pcibios_enable_resources(dev, mask)) < 0) return err; - return pcibios_enable_irq(dev); + if (!dev->msi_enabled) + return pcibios_enable_irq(dev); + return 0; } void pcibios_disable_device (struct pci_dev *dev) { - if (pcibios_disable_irq) + if (!dev->msi_enabled && pcibios_disable_irq) pcibios_disable_irq(dev); } |