diff options
92 files changed, 938 insertions, 491 deletions
@@ -3301,14 +3301,6 @@ S: 12725 SW Millikan Way, Suite 400 S: Beaverton, Oregon 97005 S: USA -N: Li Yang -E: leoli@freescale.com -D: Freescale Highspeed USB device driver -D: Freescale QE SoC support and Ethernet driver -S: B-1206 Jingmao Guojigongyu -S: 16 Baliqiao Nanjie, Beijing 101100 -S: People's Repulic of China - N: Marcelo Tosatti E: marcelo@kvack.org D: v2.4 kernel maintainer @@ -3726,6 +3718,14 @@ S: 542 West 112th Street, 5N S: New York, New York 10025 S: USA +N: Li Yang +E: leoli@freescale.com +D: Freescale Highspeed USB device driver +D: Freescale QE SoC support and Ethernet driver +S: B-1206 Jingmao Guojigongyu +S: 16 Baliqiao Nanjie, Beijing 101100 +S: People's Repulic of China + N: Victor Yodaiken E: yodaiken@fsmlabs.com D: RTLinux (RealTime Linux) diff --git a/MAINTAINERS b/MAINTAINERS index bef79776b388..4ce895a4b5ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4022,11 +4022,11 @@ S: Supported XFS FILESYSTEM P: Silicon Graphics Inc -P: Tim Shimmin, David Chatterton +P: Tim Shimmin M: xfs-masters@oss.sgi.com L: xfs@oss.sgi.com W: http://oss.sgi.com/projects/xfs -T: git git://oss.sgi.com:8090/xfs/xfs-2.6 +T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git S: Supported XILINX UARTLITE SERIAL DRIVER diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c index c4ebb5126ef7..6d5937891b46 100644 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ b/arch/i386/kernel/cpu/mtrr/generic.c @@ -42,7 +42,7 @@ static int mtrr_show; module_param_named(show, mtrr_show, bool, 0); /* Get the MSR pair relating to a var range */ -static void __init +static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); @@ -68,7 +68,7 @@ void mtrr_save_fixed_ranges(void *info) get_fixed_ranges(mtrr_state.fixed_ranges); } -static void __cpuinit print_fixed(unsigned base, unsigned step, const mtrr_type*types) +static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) { unsigned i; diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 7202b98aac4f..55b005152a11 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -639,7 +639,7 @@ static struct sysdev_driver mtrr_sysdev_driver = { * initialized (i.e. before smp_init()). * */ -void mtrr_bp_init(void) +__init void mtrr_bp_init(void) { init_ifs(); diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c index 30b754f7cbec..048f09b62553 100644 --- a/arch/i386/kernel/pci-dma.c +++ b/arch/i386/kernel/pci-dma.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/module.h> +#include <linux/pci.h> #include <asm/io.h> struct dma_coherent_mem { @@ -148,3 +149,29 @@ void *dma_mark_declared_memory_occupied(struct device *dev, return mem->virt_base + (pos << PAGE_SHIFT); } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); + +#ifdef CONFIG_PCI +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ + +int forbid_dac; +EXPORT_SYMBOL(forbid_dac); + +static __devinit void via_no_dac(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { + printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n"); + forbid_dac = 1; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); + +static int check_iommu(char *s) +{ + if (!strcmp(s, "usedac")) { + forbid_dac = -1; + return 1; + } + return 0; +} +__setup("iommu=", check_iommu); +#endif diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index b22ce8d6b1ba..7135946d3663 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -799,6 +799,7 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; +#ifndef CONFIG_KPROBES #ifdef CONFIG_HOTPLUG_CPU /* It must still be possible to apply SMP alternatives. */ if (num_possible_cpus() <= 1) @@ -808,7 +809,7 @@ void mark_rodata_ro(void) size >> PAGE_SHIFT, PAGE_KERNEL_RX); printk("Write protecting the kernel text: %luk\n", size >> 10); } - +#endif start += size; size = (unsigned long)__end_rodata - start; change_page_attr(virt_to_page(start), diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 47bd477c8ecc..2eb14a73be9c 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -68,14 +68,23 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, return base; } -static void flush_kernel_map(void *arg) +static void cache_flush_page(struct page *p) { - unsigned long adr = (unsigned long)arg; + unsigned long adr = (unsigned long)page_address(p); + int i; + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + asm volatile("clflush (%0)" :: "r" (adr + i)); +} + +static void flush_kernel_map(void *arg) +{ + struct list_head *lh = (struct list_head *)arg; + struct page *p; - if (adr && cpu_has_clflush) { - int i; - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - asm volatile("clflush (%0)" :: "r" (adr + i)); + /* High level code is not ready for clflush yet */ + if (0 && cpu_has_clflush) { + list_for_each_entry (p, lh, lru) + cache_flush_page(p); } else if (boot_cpu_data.x86_model >= 4) wbinvd(); @@ -181,9 +190,9 @@ __change_page_attr(struct page *page, pgprot_t prot) return 0; } -static inline void flush_map(void *adr) +static inline void flush_map(struct list_head *l) { - on_each_cpu(flush_kernel_map, adr, 1, 1); + on_each_cpu(flush_kernel_map, l, 1, 1); } /* @@ -225,11 +234,8 @@ void global_flush_tlb(void) spin_lock_irq(&cpa_lock); list_replace_init(&df_list, &l); spin_unlock_irq(&cpa_lock); - if (!cpu_has_clflush) - flush_map(NULL); + flush_map(&l); list_for_each_entry_safe(pg, next, &l, lru) { - if (cpu_has_clflush) - flush_map(page_address(pg)); __free_page(pg); } } diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 85cdd23b0447..a86e2e9a639f 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -418,9 +418,6 @@ config STRAM_PROC help Say Y here to report ST-RAM usage statistics in /proc/stram. -config ATARI_KBD_CORE - bool - config HEARTBEAT bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40 default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300 diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 21eb5993a19f..2e011470c347 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -13,9 +13,9 @@ #include <asm/system.h> #include <asm/hardirq.h> #include <asm/hazards.h> +#include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/smp.h> -#include <asm/mips-boards/maltaint.h> #include <asm/mipsregs.h> #include <asm/cacheflush.h> #include <asm/time.h> @@ -614,7 +614,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new, #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG unsigned int vpe = current_cpu_data.vpe_id; - vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1; + vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; #endif irq_hwmask[irq] = hwmask; @@ -822,7 +822,7 @@ void ipi_decode(struct smtc_ipi *pipi) switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); - kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++; + kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_perfcount_irq]++; /* Invoke Clock "Interrupt" */ ipi_timer_latch[dest_copy] = 0; #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 7def1ff3da94..d48d1d5bea0a 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -200,10 +200,15 @@ EXPORT_SYMBOL(null_perf_irq); EXPORT_SYMBOL(perf_irq); /* + * Timer interrupt + */ +int cp0_compare_irq; + +/* * Performance counter IRQ or -1 if shared with timer */ -int mipsxx_perfcount_irq; -EXPORT_SYMBOL(mipsxx_perfcount_irq); +int cp0_perfcount_irq; +EXPORT_SYMBOL_GPL(cp0_perfcount_irq); /* * Possibly handle a performance counter interrupt. @@ -213,12 +218,12 @@ static inline int handle_perf_irq (int r2) { /* * The performance counter overflow interrupt may be shared with the - * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a + * timer interrupt (cp0_perfcount_irq < 0). If it is and a * performance counter has overflowed (perf_irq() == IRQ_HANDLED) * and we can't reliably determine if a counter interrupt has also * happened (!r2) then don't check for a timer interrupt. */ - return (mipsxx_perfcount_irq < 0) && + return (cp0_perfcount_irq < 0) && perf_irq() == IRQ_HANDLED && !r2; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a7a17eb9bfcd..b1233644fcca 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1350,9 +1350,6 @@ void __init per_cpu_trap_init(void) if (!secondaryTC) { #endif /* CONFIG_MIPS_MT_SMTC */ - /* - * Interrupt handling. - */ if (cpu_has_veic || cpu_has_vint) { write_c0_ebase (ebase); /* Setting vector spacing enables EI/VI mode */ @@ -1366,6 +1363,23 @@ void __init per_cpu_trap_init(void) } else set_c0_cause(CAUSEF_IV); } + + /* + * Before R2 both interrupt numbers were fixed to 7, so on R2 only: + * + * o read IntCtl.IPTI to determine the timer interrupt + * o read IntCtl.IPPCI to determine the performance counter interrupt + */ + if (cpu_has_mips_r2) { + cp0_compare_irq = (read_c0_intctl () >> 29) & 7; + cp0_perfcount_irq = -1; + } else { + cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; + cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7; + if (cp0_perfcount_irq != cp0_compare_irq) + cp0_perfcount_irq = -1; + } + #ifdef CONFIG_MIPS_MT_SMTC } #endif /* CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c index 9f49da95aacf..6c8f0255e85d 100644 --- a/arch/mips/mips-boards/atlas/atlas_int.c +++ b/arch/mips/mips-boards/atlas/atlas_int.c @@ -189,7 +189,7 @@ asmlinkage void plat_irq_dispatch(void) if (irq == MIPSCPU_INT_ATLAS) atlas_hw0_irqdispatch(); else if (irq >= 0) - do_IRQ(MIPSCPU_INT_BASE + irq); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); else spurious_interrupt(); } @@ -261,11 +261,11 @@ void __init arch_init_irq(void) } else if (cpu_has_vint) { set_vi_handler (MIPSCPU_INT_ATLAS, atlas_hw0_irqdispatch); #ifdef CONFIG_MIPS_MT_SMTC - setup_irq_smtc (MIPSCPU_INT_BASE + MIPSCPU_INT_ATLAS, + setup_irq_smtc (MIPS_CPU_IRQ_BASE + MIPSCPU_INT_ATLAS, &atlasirq, (0x100 << MIPSCPU_INT_ATLAS)); #else /* Not SMTC */ - setup_irq(MIPSCPU_INT_BASE + MIPSCPU_INT_ATLAS, &atlasirq); + setup_irq(MIPS_CPU_IRQ_BASE + MIPSCPU_INT_ATLAS, &atlasirq); #endif /* CONFIG_MIPS_MT_SMTC */ } else - setup_irq(MIPSCPU_INT_BASE + MIPSCPU_INT_ATLAS, &atlasirq); + setup_irq(MIPS_CPU_IRQ_BASE + MIPSCPU_INT_ATLAS, &atlasirq); } diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index 8f1000f51b3d..c45d556aa96b 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c @@ -54,7 +54,7 @@ unsigned long cpu_khz; static int mips_cpu_timer_irq; -extern int mipsxx_perfcount_irq; +extern int cp0_perfcount_irq; extern void smtc_timer_broadcast(int); static void mips_timer_dispatch(void) @@ -64,7 +64,7 @@ static void mips_timer_dispatch(void) static void mips_perf_dispatch(void) { - do_IRQ(mipsxx_perfcount_irq); + do_IRQ(cp0_perfcount_irq); } /* @@ -82,12 +82,12 @@ static inline int handle_perf_irq (int r2) { /* * The performance counter overflow interrupt may be shared with the - * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a + * timer interrupt (cp0_perfcount_irq < 0). If it is and a * performance counter has overflowed (perf_irq() == IRQ_HANDLED) * and we can't reliably determine if a counter interrupt has also * happened (!r2) then don't check for a timer interrupt. */ - return (mipsxx_perfcount_irq < 0) && + return (cp0_perfcount_irq < 0) && perf_irq() == IRQ_HANDLED && !r2; } @@ -259,42 +259,31 @@ static struct irqaction perf_irqaction = { void __init plat_perf_setup(struct irqaction *irq) { - int hwint = 0; - mipsxx_perfcount_irq = -1; + cp0_perfcount_irq = -1; #ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch); - mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; + cp0_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; } else #endif - if (cpu_has_mips_r2) { - /* - * Read IntCtl.IPPCI to determine the performance - * counter interrupt - */ - hwint = (read_c0_intctl () >> 26) & 7; - if (hwint != MIPSCPU_INT_CPUCTR) { - if (cpu_has_vint) - set_vi_handler (hwint, mips_perf_dispatch); - mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint; - } - } - if (mipsxx_perfcount_irq >= 0) { + if (cp0_perfcount_irq >= 0) { + if (cpu_has_vint) + set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); #ifdef CONFIG_MIPS_MT_SMTC - setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint); + setup_irq_smtc(cp0_perfcount_irq, irq, + 0x100 << cp0_perfcount_irq); #else - setup_irq(mipsxx_perfcount_irq, irq); + setup_irq(cp0_perfcount_irq, irq); #endif /* CONFIG_MIPS_MT_SMTC */ #ifdef CONFIG_SMP - set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq); + set_irq_handler(cp0_perfcount_irq, handle_percpu_irq); #endif } } void __init plat_timer_setup(struct irqaction *irq) { - int hwint = 0; #ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); @@ -303,22 +292,15 @@ void __init plat_timer_setup(struct irqaction *irq) else #endif { - if (cpu_has_mips_r2) - /* - * Read IntCtl.IPTI to determine the timer interrupt - */ - hwint = (read_c0_intctl () >> 29) & 7; - else - hwint = MIPSCPU_INT_CPUCTR; if (cpu_has_vint) - set_vi_handler (hwint, mips_timer_dispatch); - mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint; + set_vi_handler(cp0_compare_irq, mips_timer_dispatch); + mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } /* we are using the cpu counter for timer interrupts */ irq->handler = mips_timer_interrupt; /* we use our own handler */ #ifdef CONFIG_MIPS_MT_SMTC - setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint); + setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); #else setup_irq(mips_cpu_timer_irq, irq); #endif /* CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c index 1668cc21d5b5..c78d48349600 100644 --- a/arch/mips/mips-boards/malta/malta_int.c +++ b/arch/mips/mips-boards/malta/malta_int.c @@ -257,7 +257,7 @@ asmlinkage void plat_irq_dispatch(void) if (irq == MIPSCPU_INT_I8259A) malta_hw0_irqdispatch(); else if (irq > 0) - do_IRQ(MIPSCPU_INT_BASE + irq); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); else spurious_interrupt(); } @@ -326,17 +326,17 @@ void __init arch_init_irq(void) set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch); #ifdef CONFIG_MIPS_MT_SMTC - setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq, + setup_irq_smtc (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq, (0x100 << MIPSCPU_INT_I8259A)); - setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, + setup_irq_smtc (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI)); #else /* Not SMTC */ - setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); - setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); + setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); + setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); #endif /* CONFIG_MIPS_MT_SMTC */ } else { - setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); - setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); + setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); + setup_irq (MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); } } diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c index c4b9de3a7f27..9ca0f82f1360 100644 --- a/arch/mips/mips-boards/sead/sead_int.c +++ b/arch/mips/mips-boards/sead/sead_int.c @@ -106,7 +106,7 @@ asmlinkage void plat_irq_dispatch(void) irq = irq_ffs(pending); if (irq >= 0) - do_IRQ(MIPSCPU_INT_BASE + irq); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); else spurious_interrupt(); } diff --git a/arch/mips/mips-boards/sead/sead_setup.c b/arch/mips/mips-boards/sead/sead_setup.c index 811aba100605..bb801409d39b 100644 --- a/arch/mips/mips-boards/sead/sead_setup.c +++ b/arch/mips/mips-boards/sead/sead_setup.c @@ -68,7 +68,7 @@ static void __init serial_init(void) #else s.iobase = SEAD_UART0_REGS_BASE+3; #endif - s.irq = MIPSCPU_INT_BASE + MIPSCPU_INT_UART0; + s.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_UART0; s.uartclk = SEAD_BASE_BAUD * 16; s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ; s.iotype = UPIO_PORT; diff --git a/arch/mips/mips-boards/sim/sim_int.c b/arch/mips/mips-boards/sim/sim_int.c index 15ac0655c1ff..766e0159ee5b 100644 --- a/arch/mips/mips-boards/sim/sim_int.c +++ b/arch/mips/mips-boards/sim/sim_int.c @@ -77,7 +77,7 @@ asmlinkage void plat_irq_dispatch(void) irq = irq_ffs(pending); if (irq > 0) - do_IRQ(MIPSCPU_INT_BASE + irq); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); else spurious_interrupt(); } diff --git a/arch/mips/mips-boards/sim/sim_time.c b/arch/mips/mips-boards/sim/sim_time.c index d3a21c741514..7224ffe31d36 100644 --- a/arch/mips/mips-boards/sim/sim_time.c +++ b/arch/mips/mips-boards/sim/sim_time.c @@ -71,8 +71,8 @@ irqreturn_t sim_timer_interrupt(int irq, void *dev_id) int vpflags = dvpe(); write_c0_compare (read_c0_count() - 1); - clear_c0_cause(0x100 << MIPSCPU_INT_CPUCTR); - set_c0_status(0x100 << MIPSCPU_INT_CPUCTR); + clear_c0_cause(0x100 << cp0_compare_irq); + set_c0_status(0x100 << cp0_compare_irq); irq_enable_hazard(); evpe(vpflags); @@ -183,8 +183,8 @@ void __init plat_timer_setup(struct irqaction *irq) } else { if (cpu_has_vint) - set_vi_handler(MIPSCPU_INT_CPUCTR, mips_timer_dispatch); - mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; + set_vi_handler(cp0_compare_irq, mips_timer_dispatch); + mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } /* we are using the cpu counter for timer interrupts */ diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index e70f57e27643..322167737de7 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -16,6 +16,8 @@ #include <asm/uaccess.h> #include <asm/assembly.h> +#include <asm/asm-offsets.h> +#include <asm/ptrace.h> #include <asm/unwind.h> @@ -26,6 +28,8 @@ #define dbg(x...) #endif +#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000) + extern struct unwind_table_entry __start___unwind[]; extern struct unwind_table_entry __stop___unwind[]; @@ -197,6 +201,29 @@ static int unwind_init(void) return 0; } +#ifdef CONFIG_64BIT +#define get_func_addr(fptr) fptr[2] +#else +#define get_func_addr(fptr) fptr[0] +#endif + +static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) +{ + void handle_interruption(int, struct pt_regs *); + static unsigned long *hi = (unsigned long)&handle_interruption; + + if (pc == get_func_addr(hi)) { + struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); + dbg("Unwinding through handle_interruption()\n"); + info->prev_sp = regs->gr[30]; + info->prev_ip = regs->iaoq[0]; + + return 1; + } + + return 0; +} + static void unwind_frame_regs(struct unwind_frame_info *info) { const struct unwind_table_entry *e; @@ -310,13 +337,15 @@ static void unwind_frame_regs(struct unwind_frame_info *info) } } - info->prev_sp = info->sp - frame_size; - if (e->Millicode) - info->rp = info->r31; - else if (rpoffset) - info->rp = *(unsigned long *)(info->prev_sp - rpoffset); - info->prev_ip = info->rp; - info->rp = 0; + if (!unwind_special(info, e->region_start, frame_size)) { + info->prev_sp = info->sp - frame_size; + if (e->Millicode) + info->rp = info->r31; + else if (rpoffset) + info->rp = *(unsigned long *)(info->prev_sp - rpoffset); + info->prev_ip = info->rp; + info->rp = 0; + } dbg("analyzing func @ %lx, setting prev_sp=%lx " "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c index 180ee2933ab9..2f24ea0d723a 100644 --- a/arch/powerpc/lib/rheap.c +++ b/arch/powerpc/lib/rheap.c @@ -437,27 +437,26 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch struct list_head *l; rh_block_t *blk; rh_block_t *newblk; - unsigned long start; + unsigned long start, sp_size; /* Validate size, and alignment must be power of two */ if (size <= 0 || (alignment & (alignment - 1)) != 0) return (unsigned long) -EINVAL; - /* given alignment larger that default rheap alignment */ - if (alignment > info->alignment) - size += alignment - 1; - /* Align to configured alignment */ size = (size + (info->alignment - 1)) & ~(info->alignment - 1); - if (assure_empty(info, 1) < 0) + if (assure_empty(info, 2) < 0) return (unsigned long) -ENOMEM; blk = NULL; list_for_each(l, &info->free_list) { blk = list_entry(l, rh_block_t, list); - if (size <= blk->size) - break; + if (size <= blk->size) { + start = (blk->start + alignment - 1) & ~(alignment - 1); + if (start + size <= blk->start + blk->size) + break; + } blk = NULL; } @@ -470,25 +469,36 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch list_del(&blk->list); newblk = blk; } else { + /* Fragment caused, split if needed */ + /* Create block for fragment in the beginning */ + sp_size = start - blk->start; + if (sp_size) { + rh_block_t *spblk; + + spblk = get_slot(info); + spblk->start = blk->start; + spblk->size = sp_size; + /* add before the blk */ + list_add(&spblk->list, blk->list.prev); + } newblk = get_slot(info); - newblk->start = blk->start; + newblk->start = start; newblk->size = size; - /* blk still in free list, with updated start, size */ - blk->start += size; - blk->size -= size; + /* blk still in free list, with updated start and size + * for fragment in the end */ + blk->start = start + size; + blk->size -= sp_size + size; + /* No fragment in the end, remove blk */ + if (blk->size == 0) { + list_del(&blk->list); + release_slot(info, blk); + } } newblk->owner = owner; - start = newblk->start; attach_taken_block(info, newblk); - /* for larger alignment return fixed up pointer */ - /* this is no problem with the deallocator since */ - /* we scan for pointers that lie in the blocks */ - if (alignment > info->alignment) - start = (start + alignment - 1) & ~(alignment - 1); - return start; } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index bfe901353142..115b25f50bf8 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -279,14 +279,13 @@ good_area: #endif /* CONFIG_8xx */ if (is_exec) { -#ifdef CONFIG_PPC64 +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* protection fault */ if (error_code & DSISR_PROTFAULT) goto bad_area; if (!(vma->vm_flags & VM_EXEC)) goto bad_area; -#endif -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) +#else pte_t *ptep; pmd_t *pmdp; diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 956571526a57..7ccb9236e8b4 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -454,6 +454,9 @@ static int initializing = 1; static int pmac_late_init(void) { + if (!machine_is(powermac)) + return -ENODEV; + initializing = 0; /* this is udbg (which is __init) and we can later use it during * cpu hotplug (in smp_core99_kick_cpu) */ diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 81a2b92ab0c2..6ffbab77ae4d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -535,8 +535,7 @@ void appldata_unregister_ops(struct appldata_ops *ops) /******************************* init / exit *********************************/ -static void -appldata_online_cpu(int cpu) +static void __cpuinit appldata_online_cpu(int cpu) { init_virt_timer(&per_cpu(appldata_timer, cpu)); per_cpu(appldata_timer, cpu).function = appldata_timer_function; @@ -580,7 +579,7 @@ appldata_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block appldata_nb = { +static struct notifier_block __cpuinitdata appldata_nb = { .notifier_call = appldata_cpu_notify, }; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c8a2212014e0..6234c6978a1f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -769,10 +769,13 @@ mcck_return: RESTORE_ALL __LC_RETURN_MCCK_PSW,0 -#ifdef CONFIG_SMP /* * Restart interruption handler, kick starter for additional CPUs */ +#ifdef CONFIG_SMP +#ifndef CONFIG_HOTPLUG_CPU + .section .init.text,"ax" +#endif .globl restart_int_handler restart_int_handler: l %r15,__LC_SAVE_AREA+60 # load ksp @@ -785,6 +788,9 @@ restart_int_handler: br %r14 # branch to start_secondary restart_addr: .long start_secondary +#ifndef CONFIG_HOTPLUG_CPU + .previous +#endif #else /* * If we do not run with SMP enabled, let the new CPU crash ... diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 93745fd8f555..685f11faa4bc 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -745,10 +745,13 @@ mcck_return: #endif lpswe __LC_RETURN_MCCK_PSW # back to caller -#ifdef CONFIG_SMP /* * Restart interruption handler, kick starter for additional CPUs */ +#ifdef CONFIG_SMP +#ifndef CONFIG_HOTPLUG_CPU + .section .init.text,"ax" +#endif .globl restart_int_handler restart_int_handler: lg %r15,__LC_SAVE_AREA+120 # load ksp @@ -759,6 +762,9 @@ restart_int_handler: lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on jg start_secondary +#ifndef CONFIG_HOTPLUG_CPU + .previous +#endif #else /* * If we do not run with SMP enabled, let the new CPU crash ... diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 51d6309e7f3b..7e1bfb984064 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -300,6 +300,7 @@ static void __init setup_zfcpdump(unsigned int console_devno) else sprintf(str, "cio_ignore=all,!0.0.%04x", ipl_info.data.fcp.dev_id.devno); + strcat(COMMAND_LINE, " "); strcat(COMMAND_LINE, str); console_loglevel = 2; } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index cbfe73034c30..ee9186f8fb08 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -253,19 +253,22 @@ void die(const char * str, struct pt_regs * regs, long err) { static int die_counter; + oops_enter(); debug_stop_all(); console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); - show_regs(regs); + print_modules(); + show_regs(regs); bust_spinlocks(0); - spin_unlock_irq(&die_lock); + spin_unlock_irq(&die_lock); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception: panic_on_oops"); - do_exit(SIGSEGV); + oops_exit(); + do_exit(SIGSEGV); } static void inline diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c index b32c35a7c0a3..e323e299878b 100644 --- a/arch/sh/kernel/signal.c +++ b/arch/sh/kernel/signal.c @@ -268,7 +268,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, badframe: force_sig(SIGSEGV, current); return 0; -} +} /* * Set up a signal frame. @@ -481,7 +481,7 @@ give_sigsegv: static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) + sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0) { int ret; @@ -489,6 +489,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, if (regs->tra >= 0) { /* If so, check system call restarting.. */ switch (regs->regs[0]) { + case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->regs[0] = -EINTR; break; @@ -500,6 +501,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, } /* fallthrough */ case -ERESTARTNOINTR: + regs->regs[0] = save_r0; regs->pc -= instruction_size( ctrl_inw(regs->pc - 4)); break; @@ -583,7 +585,8 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { + if (handle_signal(signr, &ka, &info, oldset, + regs, save_r0) == 0) { /* a signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 5b75cb6f8f9b..8f18930d5bf8 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -83,6 +83,8 @@ void die(const char * str, struct pt_regs * regs, long err) { static int die_counter; + oops_enter(); + console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); @@ -112,6 +114,7 @@ void die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); + oops_exit(); do_exit(SIGSEGV); } diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c index c8525ade0564..0bb4a8f94276 100644 --- a/arch/sh64/kernel/signal.c +++ b/arch/sh64/kernel/signal.c @@ -640,6 +640,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, if (regs->syscall_nr >= 0) { /* If so, check system call restarting.. */ switch (regs->regs[REG_RET]) { + case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->regs[REG_RET] = -EINTR; break; diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 21868f9bed7c..47565c3345d2 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -620,7 +620,7 @@ ia32_sys_call_table: .quad quiet_ni_syscall /* tux */ .quad quiet_ni_syscall /* security */ .quad sys_gettid - .quad sys_readahead /* 225 */ + .quad sys32_readahead /* 225 */ .quad sys_setxattr .quad sys_lsetxattr .quad sys_fsetxattr @@ -645,7 +645,7 @@ ia32_sys_call_table: .quad compat_sys_io_getevents .quad compat_sys_io_submit .quad sys_io_cancel - .quad sys_fadvise64 /* 250 */ + .quad sys32_fadvise64 /* 250 */ .quad quiet_ni_syscall /* free_huge_pages */ .quad sys_exit_group .quad sys32_lookup_dcookie @@ -709,7 +709,7 @@ ia32_sys_call_table: .quad compat_sys_set_robust_list .quad compat_sys_get_robust_list .quad sys_splice - .quad sys_sync_file_range + .quad sys32_sync_file_range .quad sys_tee /* 315 */ .quad compat_sys_vmsplice .quad compat_sys_move_pages diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index 200fdde18d96..99a78a3cce7c 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c @@ -860,3 +860,22 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high, return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); } +asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, size_t count) +{ + return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); +} + +asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, + unsigned n_low, unsigned n_hi, int flags) +{ + return sys_sync_file_range(fd, + ((u64)off_hi << 32) | off_low, + ((u64)n_hi << 32) | n_low, flags); +} + +asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, size_t len, + int advice) +{ + return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, + len, advice); +} diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c index 651ccfb06697..9f80aad3fe2d 100644 --- a/arch/x86_64/kernel/pci-dma.c +++ b/arch/x86_64/kernel/pci-dma.c @@ -322,5 +322,17 @@ static int __init pci_iommu_init(void) return 0; } +#ifdef CONFIG_PCI +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ + +static __devinit void via_no_dac(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { + printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n"); + forbid_dac = 1; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); +#endif /* Must execute after PCI subsystem */ fs_initcall(pci_iommu_init); diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index efb6e845114e..9a0e98accf04 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -605,6 +605,11 @@ void mark_rodata_ro(void) if (num_possible_cpus() > 1) start = (unsigned long)_etext; #endif + +#ifdef CONFIG_KPROBES + start = (unsigned long)__start_rodata; +#endif + end = (unsigned long)__end_rodata; start = (start + PAGE_SIZE - 1) & PAGE_MASK; end &= PAGE_MASK; diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index d653d0bf3df6..9148f4a4cec6 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c @@ -74,10 +74,11 @@ static void flush_kernel_map(void *arg) struct page *pg; /* When clflush is available always use it because it is - much cheaper than WBINVD */ - if (!cpu_has_clflush) + much cheaper than WBINVD. Disable clflush for now because + the high level code is not ready yet */ + if (1 || !cpu_has_clflush) asm volatile("wbinvd" ::: "memory"); - list_for_each_entry(pg, l, lru) { + else list_for_each_entry(pg, l, lru) { void *adr = page_address(pg); if (cpu_has_clflush) cache_flush_page(adr); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 545f330e59a5..ca5229d24d8e 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -527,7 +527,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev, /* fixup zero port_map */ if (!port_map) { - port_map = (1 << ahci_nr_ports(hpriv->cap)) - 1; + port_map = (1 << ahci_nr_ports(cap)) - 1; dev_printk(KERN_WARNING, &pdev->dev, "PORTS_IMPL is zero, forcing 0x%x\n", port_map); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 047eabd75363..adfae9d1ceb1 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3659,7 +3659,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, /** * ata_dev_reread_id - Re-read IDENTIFY data - * @adev: target ATA device + * @dev: target ATA device * @readid_flags: read ID flags * * Re-read IDENTIFY page and make sure @dev is still attached to @@ -3802,6 +3802,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, /* Drives which do spurious command completion */ { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, + { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, + { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, /* Devices with NCQ limits */ diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index b439351f1fd3..a16f629b7b38 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -693,6 +693,8 @@ static const struct pci_device_id amd[] = { { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 }, { }, diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index b3456d7a592c..dab4e7cf8cda 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -2,6 +2,7 @@ * pata_it821x.c - IT821x PATA for new ATA layer * (C) 2005 Red Hat Inc * Alan Cox <alan@redhat.com> + * (C) 2007 Bartlomiej Zolnierkiewicz * * based upon * @@ -79,7 +80,7 @@ #define DRV_NAME "pata_it821x" -#define DRV_VERSION "0.3.6" +#define DRV_VERSION "0.3.7" struct it821x_dev { @@ -460,14 +461,8 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc) static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused) { - int dma_enabled = 0; int i; - /* Bits 5 and 6 indicate if DMA is active on master/slave */ - /* It is possible that BMDMA isn't allocated */ - if (ap->ioaddr.bmdma_addr) - dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *dev = &ap->device[i]; if (ata_dev_enabled(dev)) { @@ -476,7 +471,7 @@ static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused dev->dma_mode = XFER_MW_DMA_0; /* We do need the right mode information for DMA or PIO and this comes from the current configuration flags */ - if (dma_enabled & (1 << (5 + i))) { + if (ata_id_has_dma(dev->id)) { ata_dev_printk(dev, KERN_INFO, "configured for DMA\n"); dev->xfer_mode = XFER_MW_DMA_0; dev->xfer_shift = ATA_SHIFT_MWDMA; @@ -799,7 +794,7 @@ MODULE_VERSION(DRV_VERSION); module_param_named(noraid, it8212_noraid, int, S_IRUGO); -MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode"); +MODULE_PARM_DESC(noraid, "Force card into bypass mode"); module_init(it821x_init); module_exit(it821x_exit); diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 0439ee951a11..a1240603912c 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -1843,35 +1843,35 @@ static const struct intel_driver_description { &intel_845_driver, &intel_830_driver }, { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", - &intel_845_driver, &intel_915_driver }, + NULL, &intel_915_driver }, { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", - &intel_845_driver, &intel_915_driver }, + NULL, &intel_915_driver }, { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", - &intel_845_driver, &intel_915_driver }, + NULL, &intel_915_driver }, { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 1, "945GM", - &intel_845_driver, &intel_915_driver }, + NULL, &intel_915_driver }, { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", - &intel_845_driver, &intel_915_driver }, + NULL, &intel_915_driver }, { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", - &intel_845_driver, &intel_i965_driver }, + NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_82965G_1_HB, PCI_DEVICE_ID_INTEL_82965G_1_IG, 0, "965G", - &intel_845_driver, &intel_i965_driver }, + NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", - &intel_845_driver, &intel_i965_driver }, + NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", - &intel_845_driver, &intel_i965_driver }, + NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 1, "965GM", - &intel_845_driver, &intel_i965_driver }, + NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", - &intel_845_driver, &intel_i965_driver }, + NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", - &intel_845_driver, &intel_g33_driver }, + NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", - &intel_845_driver, &intel_g33_driver }, + NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", - &intel_845_driver, &intel_g33_driver }, + NULL, &intel_g33_driver }, { 0, 0, 0, NULL, NULL, NULL } }; @@ -1917,8 +1917,11 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } if (bridge->driver == NULL) { - printk(KERN_WARNING PFX "Failed to find bridge device " - "(chip_id: %04x)\n", intel_agp_chipsets[i].gmch_chip_id); + /* bridge has no AGP and no IGD detected */ + if (cap_ptr) + printk(KERN_WARNING PFX "Failed to find bridge device " + "(chip_id: %04x)\n", + intel_agp_chipsets[i].gmch_chip_id); agp_put_bridge(bridge); return -ENODEV; } diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 5d402d63799f..dbb76427d529 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -640,6 +640,7 @@ iso_callback(struct fw_iso_context *context, u32 cycle, static int ioctl_create_iso_context(struct client *client, void *buffer) { struct fw_cdev_create_iso_context *request = buffer; + struct fw_iso_context *context; if (request->channel > 63) return -EINVAL; @@ -661,15 +662,17 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) return -EINVAL; } + context = fw_iso_context_create(client->device->card, + request->type, + request->channel, + request->speed, + request->header_size, + iso_callback, client); + if (IS_ERR(context)) + return PTR_ERR(context); + client->iso_closure = request->closure; - client->iso_context = fw_iso_context_create(client->device->card, - request->type, - request->channel, - request->speed, - request->header_size, - iso_callback, client); - if (IS_ERR(client->iso_context)) - return PTR_ERR(client->iso_context); + client->iso_context = context; /* We only support one context at this time. */ request->handle = 0; diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 0d08bf9b78c2..b72a5c1f9e69 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -1001,7 +1001,7 @@ static irqreturn_t irq_handler(int irq, void *data) event = reg_read(ohci, OHCI1394_IntEventClear); - if (!event) + if (!event || !~event) return IRQ_NONE; reg_write(ohci, OHCI1394_IntEventClear, event); diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 5f026b5d7857..7c13fb3c167b 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -1565,7 +1565,7 @@ static void ether1394_complete_cb(void *__ptask) /* Transmit a packet (called by kernel) */ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev) { - struct eth1394hdr *eth; + struct eth1394hdr hdr_buf; struct eth1394_priv *priv = netdev_priv(dev); __be16 proto; unsigned long flags; @@ -1595,16 +1595,17 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev) if (!skb) goto fail; - /* Get rid of the fake eth1394 header, but save a pointer */ - eth = (struct eth1394hdr *)skb->data; + /* Get rid of the fake eth1394 header, but first make a copy. + * We might need to rebuild the header on tx failure. */ + memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); skb_pull(skb, ETH1394_HLEN); - proto = eth->h_proto; + proto = hdr_buf.h_proto; dg_size = skb->len; /* Set the transmission type for the packet. ARP packets and IP * broadcast packets are sent via GASP. */ - if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || + if (memcmp(hdr_buf.h_dest, dev->broadcast, ETH1394_ALEN) == 0 || proto == htons(ETH_P_ARP) || (proto == htons(ETH_P_IP) && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { @@ -1616,7 +1617,7 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev) if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF]) priv->bc_dgl++; } else { - __be64 guid = get_unaligned((u64 *)eth->h_dest); + __be64 guid = get_unaligned((u64 *)hdr_buf.h_dest); node = eth1394_find_node_guid(&priv->ip_node_list, be64_to_cpu(guid)); @@ -1673,6 +1674,14 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev) if (dest_node == (LOCAL_BUS | ALL_NODES)) goto fail; + /* At this point we want to restore the packet. When we return + * here with NETDEV_TX_BUSY we will get another entrance in this + * routine with the same skb and we need it to look the same. + * So we pull 4 more bytes, then build the header again. */ + skb_pull(skb, 4); + ether1394_header(skb, dev, ntohs(hdr_buf.h_proto), + hdr_buf.h_dest, NULL, 0); + /* Most failures of ether1394_send_packet are recoverable. */ netif_stop_queue(dev); priv->wake_node = dest_node; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index b2a290c6703a..660b27aecae5 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -354,8 +354,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, if (is_send) { wq = &(*cur_qp)->sq; wqe_ctr = be16_to_cpu(cqe->wqe_index); - wq->tail += wqe_ctr - (u16) wq->tail; - wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)]; + wq->tail += (u16) (wqe_ctr - (u16) wq->tail); + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; } else if ((*cur_qp)->ibqp.srq) { srq = to_msrq((*cur_qp)->ibqp.srq); @@ -364,7 +364,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else { wq = &(*cur_qp)->rq; - wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)]; + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; } @@ -478,7 +478,8 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) { u32 prod_index; int nfreed = 0; - struct mlx4_cqe *cqe; + struct mlx4_cqe *cqe, *dest; + u8 owner_bit; /* * First we need to find the current producer index, so we @@ -501,9 +502,13 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); ++nfreed; - } else if (nfreed) - memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), - cqe, sizeof *cqe); + } else if (nfreed) { + dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); + owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; + memcpy(dest, cqe, sizeof *cqe); + dest->owner_sr_opcode = owner_bit | + (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); + } } if (nfreed) { diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 402f3a20ec0a..1095c82b38c2 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -125,7 +125,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; - props->max_pkeys = dev->dev->caps.pkey_table_len; + props->max_pkeys = dev->dev->caps.pkey_table_len[1]; props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * @@ -168,9 +168,9 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); - props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len; + props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; props->max_msg_sz = 0x80000000; - props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len; + props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; @@ -280,8 +280,14 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, return PTR_ERR(mailbox); memset(mailbox->buf, 0, 256); - *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; - ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); + + if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; + ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); + } else { + ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; + ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); + } err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 93dac71f3230..24ccadd6e4f8 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -95,7 +95,8 @@ struct mlx4_ib_mr { struct mlx4_ib_wq { u64 *wrid; spinlock_t lock; - int max; + int wqe_cnt; + int max_post; int max_gs; int offset; int wqe_shift; @@ -113,6 +114,7 @@ struct mlx4_ib_qp { u32 doorbell_qpn; __be32 sq_signal_bits; + int sq_spare_wqes; struct mlx4_ib_wq sq; struct ib_umem *umem; @@ -123,6 +125,7 @@ struct mlx4_ib_qp { u8 alt_port; u8 atomic_rd_en; u8 resp_depth; + u8 sq_no_prefetch; u8 state; }; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 5c6d05427a0f..28a08bdd1800 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -109,6 +109,20 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); } +/* + * Stamp a SQ WQE so that it is invalid if prefetched by marking the + * first four bytes of every 64 byte chunk with 0xffffffff, except for + * the very first chunk of the WQE. + */ +static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) +{ + u32 *wqe = get_send_wqe(qp, n); + int i; + + for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16) + wqe[i] = 0xffffffff; +} + static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { struct ib_event event; @@ -178,6 +192,8 @@ static int send_wqe_overhead(enum ib_qp_type type) case IB_QPT_GSI: return sizeof (struct mlx4_wqe_ctrl_seg) + ALIGN(MLX4_IB_UD_HEADER_SIZE + + DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, + MLX4_INLINE_ALIGN) * sizeof (struct mlx4_wqe_inline_seg), sizeof (struct mlx4_wqe_data_seg)) + ALIGN(4 + @@ -201,18 +217,18 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, if (cap->max_recv_wr) return -EINVAL; - qp->rq.max = qp->rq.max_gs = 0; + qp->rq.wqe_cnt = qp->rq.max_gs = 0; } else { /* HW requires >= 1 RQ entry with >= 1 gather entry */ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) return -EINVAL; - qp->rq.max = roundup_pow_of_two(max(1, cap->max_recv_wr)); - qp->rq.max_gs = roundup_pow_of_two(max(1, cap->max_recv_sge)); + qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); + qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); } - cap->max_recv_wr = qp->rq.max; + cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; cap->max_recv_sge = qp->rq.max_gs; return 0; @@ -236,8 +252,6 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL; - qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 1; - qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + @@ -246,20 +260,27 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); - qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) + - (qp->sq.max << qp->sq.wqe_shift); + /* + * We need to leave 2 KB + 1 WQE of headroom in the SQ to + * allow HW to prefetch. + */ + qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; + qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes); + + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + + (qp->sq.wqe_cnt << qp->sq.wqe_shift); if (qp->rq.wqe_shift > qp->sq.wqe_shift) { qp->rq.offset = 0; - qp->sq.offset = qp->rq.max << qp->rq.wqe_shift; + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; } else { - qp->rq.offset = qp->sq.max << qp->sq.wqe_shift; + qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; qp->sq.offset = 0; } - cap->max_send_wr = qp->sq.max; - cap->max_send_sge = qp->sq.max_gs; - cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) - - sizeof (struct mlx4_wqe_inline_seg); + cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes; + cap->max_send_sge = qp->sq.max_gs; + /* We don't support inline sends for kernel QPs (yet) */ + cap->max_inline_data = 0; return 0; } @@ -267,11 +288,11 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, static int set_user_sq_size(struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { - qp->sq.max = 1 << ucmd->log_sq_bb_count; + qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; qp->sq.wqe_shift = ucmd->log_sq_stride; - qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) + - (qp->sq.max << qp->sq.wqe_shift); + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + + (qp->sq.wqe_cnt << qp->sq.wqe_shift); return 0; } @@ -307,6 +328,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err; } + qp->sq_no_prefetch = ucmd.sq_no_prefetch; + err = set_user_sq_size(qp, &ucmd); if (err) goto err; @@ -334,6 +357,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_mtt; } } else { + qp->sq_no_prefetch = 0; + err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); if (err) goto err; @@ -360,16 +385,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_mtt; - qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); - qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); + qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; } - - /* We don't support inline sends for kernel QPs (yet) */ - init_attr->cap.max_inline_data = 0; } err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); @@ -583,24 +605,6 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) return 0; } -static void init_port(struct mlx4_ib_dev *dev, int port) -{ - struct mlx4_init_port_param param; - int err; - - memset(¶m, 0, sizeof param); - - param.port_width_cap = dev->dev->caps.port_width_cap; - param.vl_cap = dev->dev->caps.vl_cap; - param.mtu = ib_mtu_enum_to_int(dev->dev->caps.mtu_cap); - param.max_gid = dev->dev->caps.gid_table_len; - param.max_pkey = dev->dev->caps.pkey_table_len; - - err = mlx4_INIT_PORT(dev->dev, ¶m, port); - if (err) - printk(KERN_WARNING "INIT_PORT failed, return code %d.\n", err); -} - static int to_mlx4_st(enum ib_qp_type type) { switch (type) { @@ -674,9 +678,9 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, path->counter_index = 0xff; if (ah->ah_flags & IB_AH_GRH) { - if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len) { + if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", - ah->grh.sgid_index, dev->dev->caps.gid_table_len - 1); + ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1; } @@ -743,14 +747,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, context->mtu_msgmax = (attr->path_mtu << 5) | 31; } - if (qp->rq.max) - context->rq_size_stride = ilog2(qp->rq.max) << 3; + if (qp->rq.wqe_cnt) + context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; context->rq_size_stride |= qp->rq.wqe_shift - 4; - if (qp->sq.max) - context->sq_size_stride = ilog2(qp->sq.max) << 3; + if (qp->sq.wqe_cnt) + context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; context->sq_size_stride |= qp->sq.wqe_shift - 4; + if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + context->sq_size_stride |= !!qp->sq_no_prefetch << 7; + if (qp->ibqp.uobject) context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); else @@ -789,13 +796,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } if (attr_mask & IB_QP_ALT_PATH) { - if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len) - return -EINVAL; - if (attr->alt_port_num == 0 || attr->alt_port_num > dev->dev->caps.num_ports) return -EINVAL; + if (attr->alt_pkey_index >= + dev->dev->caps.pkey_table_len[attr->alt_port_num]) + return -EINVAL; + if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, attr->alt_port_num)) return -EINVAL; @@ -884,16 +892,19 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, /* * Before passing a kernel QP to the HW, make sure that the - * ownership bits of the send queue are set so that the - * hardware doesn't start processing stale work requests. + * ownership bits of the send queue are set and the SQ + * headroom is stamped so that the hardware doesn't start + * processing stale work requests. */ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { struct mlx4_wqe_ctrl_seg *ctrl; int i; - for (i = 0; i < qp->sq.max; ++i) { + for (i = 0; i < qp->sq.wqe_cnt; ++i) { ctrl = get_send_wqe(qp, i); ctrl->owner_opcode = cpu_to_be32(1 << 31); + + stamp_send_wqe(qp, i); } } @@ -923,7 +934,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) - init_port(dev, qp->port); + if (mlx4_INIT_PORT(dev->dev, qp->port)) + printk(KERN_WARNING "INIT_PORT failed for port %d\n", + qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) @@ -986,16 +999,17 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto out; - if ((attr_mask & IB_QP_PKEY_INDEX) && - attr->pkey_index >= dev->dev->caps.pkey_table_len) { - goto out; - } - if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { goto out; } + if (attr_mask & IB_QP_PKEY_INDEX) { + int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) + goto out; + } + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { goto out; @@ -1037,6 +1051,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, u16 pkey; int send_size; int header_size; + int spc; int i; send_size = 0; @@ -1112,10 +1127,43 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, printk("\n"); } - inl->byte_count = cpu_to_be32(1 << 31 | header_size); - memcpy(inl + 1, sqp->header_buf, header_size); + /* + * Inline data segments may not cross a 64 byte boundary. If + * our UD header is bigger than the space available up to the + * next 64 byte boundary in the WQE, use two inline data + * segments to hold the UD header. + */ + spc = MLX4_INLINE_ALIGN - + ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); + if (header_size <= spc) { + inl->byte_count = cpu_to_be32(1 << 31 | header_size); + memcpy(inl + 1, sqp->header_buf, header_size); + i = 1; + } else { + inl->byte_count = cpu_to_be32(1 << 31 | spc); + memcpy(inl + 1, sqp->header_buf, spc); - return ALIGN(sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); + inl = (void *) (inl + 1) + spc; + memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); + /* + * Need a barrier here to make sure all the data is + * visible before the byte_count field is set. + * Otherwise the HCA prefetcher could grab the 64-byte + * chunk with this inline segment and get a valid (!= + * 0xffffffff) byte count but stale data, and end up + * generating a packet with bad headers. + * + * The first inline segment's byte_count field doesn't + * need a barrier, because it comes after a + * control/MLX segment and therefore is at an offset + * of 16 mod 64. + */ + wmb(); + inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); + i = 2; + } + + return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); } static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) @@ -1124,7 +1172,7 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq struct mlx4_ib_cq *cq; cur = wq->head - wq->tail; - if (likely(cur + nreq < wq->max)) + if (likely(cur + nreq < wq->max_post)) return 0; cq = to_mcq(ib_cq); @@ -1132,7 +1180,7 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq cur = wq->head - wq->tail; spin_unlock(&cq->lock); - return cur + nreq >= wq->max; + return cur + nreq >= wq->max_post; } int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, @@ -1165,8 +1213,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.max - 1)); - qp->sq.wrid[ind & (qp->sq.max - 1)] = wr->wr_id; + ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); + qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id; ctrl->srcrb_flags = (wr->send_flags & IB_SEND_SIGNALED ? @@ -1301,7 +1349,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | - (ind & qp->sq.max ? cpu_to_be32(1 << 31) : 0); + (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); + + /* + * We can improve latency by not stamping the last + * send queue WQE until after ringing the doorbell, so + * only stamp here if there are still more WQEs to post. + */ + if (wr->next) + stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) & + (qp->sq.wqe_cnt - 1)); ++ind; } @@ -1324,6 +1381,9 @@ out: * and reach the HCA out of order. */ mmiowb(); + + stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) & + (qp->sq.wqe_cnt - 1)); } spin_unlock_irqrestore(&qp->rq.lock, flags); @@ -1344,7 +1404,7 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qp->rq.lock, flags); - ind = qp->rq.head & (qp->rq.max - 1); + ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) { @@ -1375,7 +1435,7 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, qp->rq.wrid[ind] = wr->wr_id; - ind = (ind + 1) & (qp->rq.max - 1); + ind = (ind + 1) & (qp->rq.wqe_cnt - 1); } out: diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h index 88c72d56368b..e2d11be4525c 100644 --- a/drivers/infiniband/hw/mlx4/user.h +++ b/drivers/infiniband/hw/mlx4/user.h @@ -39,7 +39,7 @@ * Increment this value if any changes that break userspace ABI * compatibility are made. */ -#define MLX4_IB_UVERBS_ABI_VERSION 2 +#define MLX4_IB_UVERBS_ABI_VERSION 3 /* * Make sure that all structs defined in this file remain laid out so @@ -87,9 +87,10 @@ struct mlx4_ib_create_srq_resp { struct mlx4_ib_create_qp { __u64 buf_addr; __u64 db_addr; - __u8 log_sq_bb_count; - __u8 log_sq_stride; - __u8 reserved[6]; + __u8 log_sq_bb_count; + __u8 log_sq_stride; + __u8 sq_no_prefetch; + __u8 reserved[5]; }; #endif /* MLX4_IB_USER_H */ diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index bd707b86c114..c97d5eb0075d 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -164,6 +164,9 @@ config KEYBOARD_AMIGA To compile this driver as a module, choose M here: the module will be called amikbd. +config ATARI_KBD_CORE + bool + config KEYBOARD_ATARI tristate "Atari keyboard" depends on ATARI diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index ee699a7d6214..0852d330c265 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -2,7 +2,7 @@ menuconfig MACINTOSH_DRIVERS bool "Macintosh device drivers" depends on PPC || MAC || X86 - default y + default y if MAC if MACINTOSH_DRIVERS diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 7e03f41ae2c2..f829e4ad8b49 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2303,19 +2303,18 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) } /* - * set link state for bonding master: if we have an active partnered + * set link state for bonding master: if we have an active * aggregator, we're up, if not, we're down. Presumes that we cannot * have an active aggregator if there are no slaves with link up. * + * This behavior complies with IEEE 802.3 section 43.3.9. + * * Called by bond_set_carrier(). Return zero if carrier state does not * change, nonzero if it does. */ int bond_3ad_set_carrier(struct bonding *bond) { - struct aggregator *agg; - - agg = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator)); - if (agg && MAC_ADDRESS_COMPARE(&agg->partner_system, &null_mac_addr)) { + if (__get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator))) { if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); return 1; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 223517dcbcfd..6287ffbda7f7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4345,8 +4345,8 @@ static void bond_free_all(void) bond_mc_list_destroy(bond); /* Release the bonded slaves */ bond_release_all(bond_dev); - unregister_netdevice(bond_dev); bond_deinit(bond_dev); + unregister_netdevice(bond_dev); } #ifdef CONFIG_PROC_FS diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index a122baa5c7bb..60cccf2aa959 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -164,9 +164,9 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t printk(KERN_INFO DRV_NAME ": %s is being deleted...\n", bond->dev->name); - unregister_netdevice(bond->dev); bond_deinit(bond->dev); bond_destroy_sysfs_entry(bond); + unregister_netdevice(bond->dev); rtnl_unlock(); goto out; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 41aa78bf1f78..a89102116ccb 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -22,8 +22,8 @@ #include "bond_3ad.h" #include "bond_alb.h" -#define DRV_VERSION "3.1.2" -#define DRV_RELDATE "January 20, 2007" +#define DRV_VERSION "3.1.3" +#define DRV_RELDATE "June 13, 2007" #define DRV_NAME "bonding" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c index 73a41e6a5bfc..ee140e63ddc5 100644 --- a/drivers/net/cxgb3/ael1002.c +++ b/drivers/net/cxgb3/ael1002.c @@ -219,7 +219,13 @@ static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok, unsigned int status; status = t3_read_reg(phy->adapter, - XGM_REG(A_XGM_SERDES_STAT0, phy->addr)); + XGM_REG(A_XGM_SERDES_STAT0, phy->addr)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT3, phy->addr)); *link_ok = !(status & F_LOWSIG0); } if (speed) @@ -247,5 +253,5 @@ static struct cphy_ops xaui_direct_ops = { void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { - cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops); + cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops); } diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 1b20f4060e2d..d8a1f5452c51 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2071,10 +2071,20 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) static void cxgb_netpoll(struct net_device *dev) { struct adapter *adapter = dev->priv; - struct sge_qset *qs = dev2qset(dev); + struct port_info *pi = netdev_priv(dev); + int qidx; - t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq, - adapter); + for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { + struct sge_qset *qs = &adapter->sge.qs[qidx]; + void *source; + + if (adapter->flags & USING_MSIX) + source = qs; + else + source = adapter; + + t3_intr_handler(adapter, qs->rspq.polling) (0, source); + } } #endif diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index e5a553410e24..020859c855d7 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -1882,6 +1882,10 @@ #define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES) #define F_COPYALLFRAMES V_COPYALLFRAMES(1U) +#define S_DISBCAST 1 +#define V_DISBCAST(x) ((x) << S_DISBCAST) +#define F_DISBCAST V_DISBCAST(1U) + #define A_XGM_RX_HASH_LOW 0x814 #define A_XGM_RX_HASH_HIGH 0x818 @@ -2128,6 +2132,8 @@ #define F_RESETPLL01 V_RESETPLL01(1U) #define A_XGM_SERDES_STAT0 0x8f0 +#define A_XGM_SERDES_STAT1 0x8f4 +#define A_XGM_SERDES_STAT2 0x8f8 #define S_LOWSIG0 0 #define V_LOWSIG0(x) ((x) << S_LOWSIG0) diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 3666586a4831..a60ec4d4707c 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1690,8 +1690,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, struct port_info *pi; skb_pull(skb, sizeof(*p) + pad); - skb->dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, adap->port[p->iff]); + skb->dev->last_rx = jiffies; pi = netdev_priv(skb->dev); if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && !p->fragment) { @@ -2217,7 +2217,6 @@ irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); - BUG_ON(napi_is_scheduled(qs->netdev)); if (handle_responses(adap, q) < 0) q->unhandled_irqs++; diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index a506792f9575..b261be147e7b 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -231,6 +231,28 @@ int t3_mac_set_num_ucast(struct cmac *mac, int n) return 0; } +static void disable_exact_filters(struct cmac *mac) +{ + unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1; + + for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { + u32 v = t3_read_reg(mac->adapter, reg); + t3_write_reg(mac->adapter, reg, v); + } + t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ +} + +static void enable_exact_filters(struct cmac *mac) +{ + unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1; + + for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { + u32 v = t3_read_reg(mac->adapter, reg); + t3_write_reg(mac->adapter, reg, v); + } + t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ +} + /* Calculate the RX hash filter index of an Ethernet address */ static int hash_hw_addr(const u8 * addr) { @@ -281,6 +303,14 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm) return 0; } +static int rx_fifo_hwm(int mtu) +{ + int hwm; + + hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100); + return min(hwm, MAC_RXFIFO_SIZE - 8192); +} + int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) { int hwm, lwm; @@ -306,11 +336,38 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); + if (adap->params.rev == T3_REV_B2 && + (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { + disable_exact_filters(mac); + t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + mac->offset, + F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); + + /* drain rx FIFO */ + if (t3_wait_op_done(adap, + A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + + mac->offset, + 1 << 31, 1, 20, 5)) { + t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); + enable_exact_filters(mac); + return -EIO; + } + t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + enable_exact_filters(mac); + } else + t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + + /* + * Adjust the PAUSE frame watermarks. We always set the LWM, and the + * HWM only if flow-control is enabled. + */ + hwm = rx_fifo_hwm(mtu); + lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); v |= V_RXFIFOPAUSELWM(lwm / 8); if (G_RXFIFOPAUSEHWM(v)) v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) | V_RXFIFOPAUSEHWM(hwm / 8); + t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); /* Adjust the TX FIFO threshold based on the MTU */ @@ -329,7 +386,6 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) (hwm - lwm) * 4 / 8); t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, MAC_RXFIFO_SIZE * 4 * 8 / 512); - return 0; } @@ -357,6 +413,15 @@ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc) V_PORTSPEED(M_PORTSPEED), val); } + val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft); + val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM); + if (fc & PAUSE_TX) + val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm( + t3_read_reg(adap, + A_XGM_RX_MAX_PKT_SIZE + + oft)) / 8); + t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val); + t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, (fc & PAUSE_RX) ? F_TXPAUSEEN : 0); return 0; @@ -436,6 +501,10 @@ int t3b2_mac_watchdog_task(struct cmac *mac) unsigned int rx_xcnt; int status; + status = 0; + tx_xcnt = 1; /* By default tx_xcnt is making progress */ + tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ + rx_xcnt = 1; /* By default rx_xcnt is making progress */ if (tx_mcnt == mac->tx_mcnt) { tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_TX_SPI4_SOP_EOP_CNT + @@ -446,37 +515,44 @@ int t3b2_mac_watchdog_task(struct cmac *mac) tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, A_TP_PIO_DATA))); } else { - mac->toggle_cnt = 0; - return 0; + goto rxcheck; } } else { mac->toggle_cnt = 0; - return 0; + goto rxcheck; } if (((tx_tcnt != mac->tx_tcnt) && (tx_xcnt == 0) && (mac->tx_xcnt == 0)) || ((mac->tx_mcnt == tx_mcnt) && (tx_xcnt != 0) && (mac->tx_xcnt != 0))) { - if (mac->toggle_cnt > 4) + if (mac->toggle_cnt > 4) { status = 2; - else + goto out; + } else { status = 1; + goto out; + } } else { mac->toggle_cnt = 0; - return 0; + goto rxcheck; } +rxcheck: if (rx_mcnt != mac->rx_mcnt) rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_RX_SPI4_SOP_EOP_CNT + mac->offset))); - else - return 0; + else + goto out; - if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) + if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && + mac->rx_xcnt == 0) { status = 2; - + goto out; + } + +out: mac->tx_tcnt = tx_tcnt; mac->tx_xcnt = tx_xcnt; mac->tx_mcnt = s->tx_frames; diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 32788ca40d25..42ba1c012ee2 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -4825,8 +4825,10 @@ static int nv_close(struct net_device *dev) drain_ring(dev); - if (np->wolenabled) + if (np->wolenabled) { + writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); nv_start_rx(dev); + } /* FIXME: power down nic */ diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index 638a279ec505..9853c74f6bbf 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c @@ -240,7 +240,7 @@ static int __init mipsnet_probe(struct device *dev) * TODO: probe for these or load them from PARAM */ netdev->base_addr = 0x4200; - netdev->irq = MIPSCPU_INT_BASE + MIPSCPU_INT_MB0 + + netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + inl(mipsnet_reg_address(netdev, interruptInfo)); // Get the io region now, get irq on open() diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index e7ca118c8dfd..d2b065351e45 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -38,7 +38,9 @@ #include "icm.h" enum { - MLX4_COMMAND_INTERFACE_REV = 1 + MLX4_COMMAND_INTERFACE_MIN_REV = 2, + MLX4_COMMAND_INTERFACE_MAX_REV = 3, + MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, }; extern void __buggy_use_of_MLX4_GET(void); @@ -107,6 +109,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) u16 size; u16 stat_rate; int err; + int i; #define QUERY_DEV_CAP_OUT_SIZE 0x100 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 @@ -176,7 +179,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, MLX4_CMD_TIME_CLASS_A); - if (err) goto out; @@ -216,18 +218,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->max_rdma_global = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); dev_cap->local_ca_ack_delay = field & 0x1f; - MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); - dev_cap->max_mtu = field >> 4; - dev_cap->max_port_width = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); - dev_cap->max_vl = field >> 4; dev_cap->num_ports = field & 0xf; - MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); - dev_cap->max_gids = 1 << (field & 0xf); MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; - MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); - dev_cap->max_pkeys = 1 << (field & 0xf); MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); dev_cap->reserved_uars = field >> 4; @@ -304,6 +298,42 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + for (i = 1; i <= dev_cap->num_ports; ++i) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->max_vl[i] = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + dev_cap->max_mtu[i] = field >> 4; + dev_cap->max_port_width[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + dev_cap->max_gids[i] = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + dev_cap->max_pkeys[i] = 1 << (field & 0xf); + } + } else { +#define QUERY_PORT_MTU_OFFSET 0x01 +#define QUERY_PORT_WIDTH_OFFSET 0x06 +#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 +#define QUERY_PORT_MAX_VL_OFFSET 0x0b + + for (i = 1; i <= dev_cap->num_ports; ++i) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, + MLX4_CMD_TIME_CLASS_B); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); + dev_cap->max_mtu[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); + dev_cap->max_port_width[i] = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); + dev_cap->max_gids[i] = 1 << (field >> 4); + dev_cap->max_pkeys[i] = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); + dev_cap->max_vl[i] = field & 0xf; + } + } + if (dev_cap->bmme_flags & 1) mlx4_dbg(dev, "Base MM extensions: yes " "(flags %d, rsvd L_Key %08x)\n", @@ -338,8 +368,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", - dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu, - dev_cap->max_port_width); + dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], + dev_cap->max_port_width[1]); mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", @@ -491,7 +521,8 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0x0000ffffull) << 16); MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); - if (cmd_if_rev != MLX4_COMMAND_INTERFACE_REV) { + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || + cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { mlx4_err(dev, "Installed FW has unsupported " "command interface revision %d.\n", cmd_if_rev); @@ -499,12 +530,15 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) (int) (dev->caps.fw_ver >> 32), (int) (dev->caps.fw_ver >> 16) & 0xffff, (int) dev->caps.fw_ver & 0xffff); - mlx4_err(dev, "This driver version supports only revision %d.\n", - MLX4_COMMAND_INTERFACE_REV); + mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", + MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); err = -ENODEV; goto out; } + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) + dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; + MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); cmd->max_cmds = 1 << lg; @@ -708,13 +742,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) return err; } -int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port) +int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) { struct mlx4_cmd_mailbox *mailbox; u32 *inbox; int err; u32 flags; + u16 field; + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { #define INIT_PORT_IN_SIZE 256 #define INIT_PORT_FLAGS_OFFSET 0x00 #define INIT_PORT_FLAG_SIG (1 << 18) @@ -729,32 +765,32 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int #define INIT_PORT_NODE_GUID_OFFSET 0x18 #define INIT_PORT_SI_GUID_OFFSET 0x20 - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - inbox = mailbox->buf; + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; - memset(inbox, 0, INIT_PORT_IN_SIZE); + memset(inbox, 0, INIT_PORT_IN_SIZE); - flags = 0; - flags |= param->set_guid0 ? INIT_PORT_FLAG_G0 : 0; - flags |= param->set_node_guid ? INIT_PORT_FLAG_NG : 0; - flags |= param->set_si_guid ? INIT_PORT_FLAG_SIG : 0; - flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT; - flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; - MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); + flags = 0; + flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; + flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; + MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); - MLX4_PUT(inbox, param->mtu, INIT_PORT_MTU_OFFSET); - MLX4_PUT(inbox, param->max_gid, INIT_PORT_MAX_GID_OFFSET); - MLX4_PUT(inbox, param->max_pkey, INIT_PORT_MAX_PKEY_OFFSET); - MLX4_PUT(inbox, param->guid0, INIT_PORT_GUID0_OFFSET); - MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET); - MLX4_PUT(inbox, param->si_guid, INIT_PORT_SI_GUID_OFFSET); + field = 128 << dev->caps.mtu_cap[port]; + MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); + field = dev->caps.gid_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); + field = dev->caps.pkey_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); - err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, - MLX4_CMD_TIME_CLASS_A); + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); - mlx4_free_cmd_mailbox(dev, mailbox); + mlx4_free_cmd_mailbox(dev, mailbox); + } else + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A); return err; } diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index 2616fa53d4d0..296254ac27c1 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h @@ -59,13 +59,13 @@ struct mlx4_dev_cap { int max_responder_per_qp; int max_rdma_global; int local_ca_ack_delay; - int max_mtu; - int max_port_width; - int max_vl; int num_ports; - int max_gids; + int max_mtu[MLX4_MAX_PORTS + 1]; + int max_port_width[MLX4_MAX_PORTS + 1]; + int max_vl[MLX4_MAX_PORTS + 1]; + int max_gids[MLX4_MAX_PORTS + 1]; + int max_pkeys[MLX4_MAX_PORTS + 1]; u16 stat_rate_support; - int max_pkeys; u32 flags; int reserved_uars; int uar_size; diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index d4172937025b..41eafebf5823 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -88,6 +88,7 @@ static struct mlx4_profile default_profile = { static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; + int i; err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { @@ -117,11 +118,15 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev } dev->caps.num_ports = dev_cap->num_ports; + for (i = 1; i <= dev->caps.num_ports; ++i) { + dev->caps.vl_cap[i] = dev_cap->max_vl[i]; + dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; + dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; + dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; + dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; + } + dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; - dev->caps.vl_cap = dev_cap->max_vl; - dev->caps.mtu_cap = dev_cap->max_mtu; - dev->caps.gid_table_len = dev_cap->max_gids; - dev->caps.pkey_table_len = dev_cap->max_pkeys; dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; dev->caps.bf_reg_size = dev_cap->bf_reg_size; dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; @@ -148,7 +153,6 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; - dev->caps.port_width_cap = dev_cap->max_port_width; dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 4cf0d3fcb519..460a08718c69 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -690,7 +690,7 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev, { struct netdev_private *np = netdev_priv(to_net_dev(dev)); int new_setting; - u32 flags; + unsigned long flags; /* Find out the new setting */ if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index b47ad1df2e0c..7a4aa6a9f949 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -460,13 +460,9 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; } else { hwdescr->buf_addr = buf; - hwdescr->next_descr_addr = 0; wmb(); hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOINTR_COMPLETE; - - wmb(); - descr->prev->hwdescr->next_descr_addr = descr->bus_addr; } return 0; @@ -541,12 +537,16 @@ spider_net_refill_rx_chain(struct spider_net_card *card) static int spider_net_alloc_rx_skbs(struct spider_net_card *card) { - int result; - struct spider_net_descr_chain *chain; + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *start = chain->tail; + struct spider_net_descr *descr = start; - result = -ENOMEM; + /* Link up the hardware chain pointers */ + do { + descr->prev->hwdescr->next_descr_addr = descr->bus_addr; + descr = descr->next; + } while (descr != start); - chain = &card->rx_chain; /* Put at least one buffer into the chain. if this fails, * we've got a problem. If not, spider_net_refill_rx_chain * will do the rest at the end of this function. */ @@ -563,7 +563,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card) error: spider_net_free_rx_chain_contents(card); - return result; + return -ENOMEM; } /** @@ -718,7 +718,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; spin_unlock_irqrestore(&chain->lock, flags); - if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL) + if (skb->ip_summed == CHECKSUM_PARTIAL) switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; @@ -1051,6 +1051,66 @@ static void show_rx_chain(struct spider_net_card *card) #endif /** + * spider_net_resync_head_ptr - Advance head ptr past empty descrs + * + * If the driver fails to keep up and empty the queue, then the + * hardware wil run out of room to put incoming packets. This + * will cause the hardware to skip descrs that are full (instead + * of halting/retrying). Thus, once the driver runs, it wil need + * to "catch up" to where the hardware chain pointer is at. + */ +static void spider_net_resync_head_ptr(struct spider_net_card *card) +{ + unsigned long flags; + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *descr; + int i, status; + + /* Advance head pointer past any empty descrs */ + descr = chain->head; + status = spider_net_get_descr_status(descr->hwdescr); + + if (status == SPIDER_NET_DESCR_NOT_IN_USE) + return; + + spin_lock_irqsave(&chain->lock, flags); + + descr = chain->head; + status = spider_net_get_descr_status(descr->hwdescr); + for (i=0; i<chain->num_desc; i++) { + if (status != SPIDER_NET_DESCR_CARDOWNED) break; + descr = descr->next; + status = spider_net_get_descr_status(descr->hwdescr); + } + chain->head = descr; + + spin_unlock_irqrestore(&chain->lock, flags); +} + +static int spider_net_resync_tail_ptr(struct spider_net_card *card) +{ + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *descr; + int i, status; + + /* Advance tail pointer past any empty and reaped descrs */ + descr = chain->tail; + status = spider_net_get_descr_status(descr->hwdescr); + + for (i=0; i<chain->num_desc; i++) { + if ((status != SPIDER_NET_DESCR_CARDOWNED) && + (status != SPIDER_NET_DESCR_NOT_IN_USE)) break; + descr = descr->next; + status = spider_net_get_descr_status(descr->hwdescr); + } + chain->tail = descr; + + if ((i == chain->num_desc) || (i == 0)) + return 1; + return 0; +} + +/** * spider_net_decode_one_descr - processes an RX descriptor * @card: card structure * @@ -1112,7 +1172,7 @@ spider_net_decode_one_descr(struct spider_net_card *card) goto bad_desc; } - if (hwdescr->dmac_cmd_status & 0xfefe) { + if (hwdescr->dmac_cmd_status & 0xfcf4) { pr_err("%s: bad status, cmd_status=x%08x\n", card->netdev->name, hwdescr->dmac_cmd_status); @@ -1131,6 +1191,7 @@ spider_net_decode_one_descr(struct spider_net_card *card) /* Ok, we've got a packet in descr */ spider_net_pass_skb_up(descr, card); + descr->skb = NULL; hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; return 1; @@ -1174,6 +1235,12 @@ spider_net_poll(struct net_device *netdev, int *budget) } } + if ((packets_done == 0) && (card->num_rx_ints != 0)) { + no_more_packets = spider_net_resync_tail_ptr(card); + spider_net_resync_head_ptr(card); + } + card->num_rx_ints = 0; + netdev->quota -= packets_done; *budget -= packets_done; spider_net_refill_rx_chain(card); @@ -1184,6 +1251,7 @@ spider_net_poll(struct net_device *netdev, int *budget) if (no_more_packets) { netif_rx_complete(netdev); spider_net_rx_irq_on(card); + card->ignore_rx_ramfull = 0; return 0; } @@ -1417,11 +1485,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) case SPIDER_NET_GRFBFLLINT: /* fallthrough */ case SPIDER_NET_GRFAFLLINT: /* fallthrough */ case SPIDER_NET_GRMFLLINT: - if (netif_msg_intr(card) && net_ratelimit()) - pr_err("Spider RX RAM full, incoming packets " - "might be discarded!\n"); - spider_net_rx_irq_off(card); - netif_rx_schedule(card->netdev); + /* Could happen when rx chain is full */ + if (card->ignore_rx_ramfull == 0) { + card->ignore_rx_ramfull = 1; + spider_net_resync_head_ptr(card); + spider_net_refill_rx_chain(card); + spider_net_enable_rxdmac(card); + card->num_rx_ints ++; + netif_rx_schedule(card->netdev); + } show_error = 0; break; @@ -1436,12 +1508,11 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) case SPIDER_NET_GDCDCEINT: /* fallthrough */ case SPIDER_NET_GDBDCEINT: /* fallthrough */ case SPIDER_NET_GDADCEINT: - if (netif_msg_intr(card) && net_ratelimit()) - pr_err("got descriptor chain end interrupt, " - "restarting DMAC %c.\n", - 'D'-(i-SPIDER_NET_GDDDCEINT)/3); + spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); + card->num_rx_ints ++; + netif_rx_schedule(card->netdev); show_error = 0; break; @@ -1450,9 +1521,12 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) case SPIDER_NET_GDCINVDINT: /* fallthrough */ case SPIDER_NET_GDBINVDINT: /* fallthrough */ case SPIDER_NET_GDAINVDINT: - /* could happen when rx chain is full */ + /* Could happen when rx chain is full */ + spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); + card->num_rx_ints ++; + netif_rx_schedule(card->netdev); show_error = 0; break; @@ -1545,6 +1619,7 @@ spider_net_interrupt(int irq, void *ptr) if (status_reg & SPIDER_NET_RXINT ) { spider_net_rx_irq_off(card); netif_rx_schedule(netdev); + card->num_rx_ints ++; } if (status_reg & SPIDER_NET_TXINT) netif_rx_schedule(netdev); @@ -2185,11 +2260,13 @@ spider_net_setup_netdev(struct spider_net_card *card) spider_net_setup_netdev_ops(netdev); - netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; + netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX; /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | * NETIF_F_HW_VLAN_FILTER */ netdev->irq = card->pdev->irq; + card->num_rx_ints = 0; + card->ignore_rx_ramfull = 0; dn = pci_device_to_OF_node(card->pdev); if (!dn) diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 4a1e0d28a502..1d054aa71504 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -25,7 +25,7 @@ #ifndef _SPIDER_NET_H #define _SPIDER_NET_H -#define VERSION "2.0 A" +#define VERSION "2.0 B" #include "sungem_phy.h" @@ -222,6 +222,7 @@ extern char spider_net_driver_name[]; #define SPIDER_NET_GDTBSTA 0x00000300 #define SPIDER_NET_GDTDCEIDIS 0x00000002 #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ + SPIDER_NET_GDTDCEIDIS | \ SPIDER_NET_GDTBSTA #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 @@ -332,8 +333,7 @@ enum spider_net_int2_status { SPIDER_NET_GRISPDNGINT }; -#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GDTFDCINT) | \ - (1 << SPIDER_NET_GDTDCEINT) ) +#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT) /* We rely on flagged descriptor interrupts */ #define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) ) @@ -461,6 +461,8 @@ struct spider_net_card { struct work_struct tx_timeout_task; atomic_t tx_timeout_task_counter; wait_queue_head_t waitq; + int num_rx_ints; + int ignore_rx_ramfull; /* for ethtool */ int msg_enable; diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index 6bcf03fc89be..d940474e024a 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c @@ -134,22 +134,6 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n) return 0; } -static uint32_t -spider_net_ethtool_get_tx_csum(struct net_device *netdev) -{ - return (netdev->features & NETIF_F_HW_CSUM) != 0; -} - -static int -spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data) -{ - if (data) - netdev->features |= NETIF_F_HW_CSUM; - else - netdev->features &= ~NETIF_F_HW_CSUM; - - return 0; -} static void spider_net_ethtool_get_ringparam(struct net_device *netdev, @@ -200,11 +184,12 @@ const struct ethtool_ops spider_net_ethtool_ops = { .get_wol = spider_net_ethtool_get_wol, .get_msglevel = spider_net_ethtool_get_msglevel, .set_msglevel = spider_net_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, .nway_reset = spider_net_ethtool_nway_reset, .get_rx_csum = spider_net_ethtool_get_rx_csum, .set_rx_csum = spider_net_ethtool_set_rx_csum, - .get_tx_csum = spider_net_ethtool_get_tx_csum, - .set_tx_csum = spider_net_ethtool_set_tx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, .get_ringparam = spider_net_ethtool_get_ringparam, .get_strings = spider_net_get_strings, .get_stats_count = spider_net_get_stats_count, diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 98be2880757d..e5d7ed92d6f7 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -195,12 +195,6 @@ static int led_proc_write(struct file *file, const char *buf, cur = lbuf; - /* skip initial spaces */ - while (*cur && isspace(*cur)) - { - cur++; - } - switch ((long)data) { case LED_NOLCD: diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 66eb0688d523..4e711a985d59 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -267,7 +267,9 @@ struct zcore_header { u64 tod; cpuid_t cpu_id; u32 arch_id; + u32 volnr; u32 build_arch; + u64 rmem_size; char pad2[4016]; } __attribute__((packed,__aligned__(16))); @@ -559,6 +561,7 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr) else hdr->arch_id = DUMP_ARCH_S390; hdr->mem_size = sys_info.mem_size; + hdr->rmem_size = sys_info.mem_size; hdr->mem_end = sys_info.mem_size; hdr->num_pages = sys_info.mem_size / PAGE_SIZE; hdr->tod = get_clock(); diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 6dd64d0c8d45..348bb7b82771 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -3912,6 +3912,7 @@ static int add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) { struct chbk *p_ch; + struct ccw_dev_id dev_id; #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__); @@ -3921,7 +3922,8 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) p_ch = &privptr->channel[i]; p_ch->cdev = cdev; snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id); - sscanf(cdev->dev.bus_id+4,"%x",&p_ch->devno); + ccw_device_get_id(cdev, &dev_id); + p_ch->devno = dev_id.devno; if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { printk(KERN_WARNING "%s Out of memory in %s for irb\n", p_ch->id,__FUNCTION__); @@ -3955,6 +3957,7 @@ claw_new_device(struct ccwgroup_device *cgdev) struct claw_env *p_env; struct net_device *dev; int ret; + struct ccw_dev_id dev_id; pr_debug("%s() called\n", __FUNCTION__); printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); @@ -3965,10 +3968,10 @@ claw_new_device(struct ccwgroup_device *cgdev) if (!privptr) return -ENODEV; p_env = privptr->p_env; - sscanf(cgdev->cdev[READ]->dev.bus_id+4,"%x", - &p_env->devno[READ]); - sscanf(cgdev->cdev[WRITE]->dev.bus_id+4,"%x", - &p_env->devno[WRITE]); + ccw_device_get_id(cgdev->cdev[READ], &dev_id); + p_env->devno[READ] = dev_id.devno; + ccw_device_get_id(cgdev->cdev[WRITE], &dev_id); + p_env->devno[WRITE] = dev_id.devno; ret = add_channel(cgdev->cdev[0],0,privptr); if (ret == 0) ret = add_channel(cgdev->cdev[1],1,privptr); diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index c358764f3264..3d28e1a5bf79 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -134,18 +134,6 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ *(((char*)ptr)+28),*(((char*)ptr)+29), \ *(((char*)ptr)+30),*(((char*)ptr)+31)); -static inline void iucv_hex_dump(unsigned char *buf, size_t len) -{ - size_t i; - - for (i = 0; i < len; i++) { - if (i && !(i % 16)) - printk("\n"); - printk("%02x ", *(buf + i)); - } - printk("\n"); -} - #define PRINTK_HEADER " iucv: " /* for debugging */ static struct device_driver netiucv_driver = { @@ -212,7 +200,7 @@ struct iucv_connection { */ static struct list_head iucv_connection_list = LIST_HEAD_INIT(iucv_connection_list); -static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(iucv_connection_rwlock); /** * Representation of event-data for the @@ -280,7 +268,7 @@ static u8 iucvMagic[16] = { * * @returns The printable string (static data!!) */ -static inline char *netiucv_printname(char *name) +static char *netiucv_printname(char *name) { static char tmp[9]; char *p = tmp; @@ -1315,7 +1303,8 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) * and throw away packet. */ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { - fsm_event(privptr->fsm, DEV_EVENT_START, dev); + if (!in_atomic()) + fsm_event(privptr->fsm, DEV_EVENT_START, dev); dev_kfree_skb(skb); privptr->stats.tx_dropped++; privptr->stats.tx_errors++; @@ -1729,7 +1718,7 @@ static struct attribute_group netiucv_stat_attr_group = { .attrs = netiucv_stat_attrs, }; -static inline int netiucv_add_files(struct device *dev) +static int netiucv_add_files(struct device *dev) { int ret; @@ -1743,7 +1732,7 @@ static inline int netiucv_add_files(struct device *dev) return ret; } -static inline void netiucv_remove_files(struct device *dev) +static void netiucv_remove_files(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __FUNCTION__); sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 4640f32daae5..70108fb16906 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -424,8 +424,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, /* prepare qdio hdr */ if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + - eddp->nhl + eddp->thl - - sizeof(struct qeth_hdr); + eddp->nhl + eddp->thl; #ifdef CONFIG_QETH_VLAN if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 0b96d49dd636..86b0c44165c1 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -986,15 +986,15 @@ qeth_recover(void *ptr) card->use_hard_stop = 1; __qeth_set_offline(card->gdev,1); rc = __qeth_set_online(card->gdev,1); + /* don't run another scheduled recovery */ + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); if (!rc) PRINT_INFO("Device %s successfully recovered!\n", CARD_BUS_ID(card)); else PRINT_INFO("Device %s could not be recovered!\n", CARD_BUS_ID(card)); - /* don't run another scheduled recovery */ - qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); - qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; } @@ -2176,13 +2176,6 @@ qeth_ulp_enable(struct qeth_card *card) } -static inline __u16 -__raw_devno_from_bus_id(char *id) -{ - id += (strlen(id) - 4); - return (__u16) simple_strtoul(id, &id, 16); -} - static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -2205,6 +2198,7 @@ qeth_ulp_setup(struct qeth_card *card) int rc; __u16 temp; struct qeth_cmd_buffer *iob; + struct ccw_dev_id dev_id; QETH_DBF_TEXT(setup,2,"ulpsetup"); @@ -2218,8 +2212,8 @@ qeth_ulp_setup(struct qeth_card *card) memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); - temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card)); - memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2); + ccw_device_get_id(CARD_DDEV(card), &dev_id); + memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, @@ -5850,9 +5844,9 @@ qeth_add_vlan_mc6(struct qeth_card *card) in_dev = in6_dev_get(netdev); if (!in_dev) continue; - read_lock(&in_dev->lock); + read_lock_bh(&in_dev->lock); qeth_add_mc6(card,in_dev); - read_unlock(&in_dev->lock); + read_unlock_bh(&in_dev->lock); in6_dev_put(in_dev); } #endif /* CONFIG_QETH_VLAN */ @@ -5869,10 +5863,10 @@ qeth_add_multicast_ipv6(struct qeth_card *card) in6_dev = in6_dev_get(card->dev); if (in6_dev == NULL) return; - read_lock(&in6_dev->lock); + read_lock_bh(&in6_dev->lock); qeth_add_mc6(card, in6_dev); qeth_add_vlan_mc6(card); - read_unlock(&in6_dev->lock); + read_unlock_bh(&in6_dev->lock); in6_dev_put(in6_dev); } #endif /* CONFIG_QETH_IPV6 */ @@ -7476,11 +7470,11 @@ qeth_softsetup_card(struct qeth_card *card) QETH_DBF_TEXT_(setup, 2, "1err%d", rc); if (rc == 0xe080){ PRINT_WARN("LAN on card %s if offline! " - "Continuing softsetup.\n", + "Waiting for STARTLAN from card.\n", CARD_BUS_ID(card)); card->lan_online = 0; - } else - return rc; + } + return rc; } else card->lan_online = 1; if (card->info.type==QETH_CARD_TYPE_OSN) @@ -7797,15 +7791,17 @@ qeth_print_status_message(struct qeth_card *card) } /* fallthrough */ case QETH_CARD_TYPE_IQD: - card->info.mcl_level[0] = (char) _ebcasc[(__u8) - card->info.mcl_level[0]]; - card->info.mcl_level[1] = (char) _ebcasc[(__u8) - card->info.mcl_level[1]]; - card->info.mcl_level[2] = (char) _ebcasc[(__u8) - card->info.mcl_level[2]]; - card->info.mcl_level[3] = (char) _ebcasc[(__u8) - card->info.mcl_level[3]]; - card->info.mcl_level[QETH_MCL_LENGTH] = 0; + if (card->info.guestlan) { + card->info.mcl_level[0] = (char) _ebcasc[(__u8) + card->info.mcl_level[0]]; + card->info.mcl_level[1] = (char) _ebcasc[(__u8) + card->info.mcl_level[1]]; + card->info.mcl_level[2] = (char) _ebcasc[(__u8) + card->info.mcl_level[2]]; + card->info.mcl_level[3] = (char) _ebcasc[(__u8) + card->info.mcl_level[3]]; + card->info.mcl_level[QETH_MCL_LENGTH] = 0; + } break; default: memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 86fb671a8bcc..ed90403f0ee7 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -159,7 +159,7 @@ xfs_iozero( if (status) goto unlock; - memclear_highpage_flush(page, offset, bytes); + zero_user_page(page, offset, bytes, KM_USER0); status = mapping->a_ops->commit_write(NULL, page, offset, offset + bytes); diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h index 183eebeebbdc..f1d72d177f68 100644 --- a/include/asm-i386/dma-mapping.h +++ b/include/asm-i386/dma-mapping.h @@ -123,6 +123,8 @@ dma_mapping_error(dma_addr_t dma_addr) return 0; } +extern int forbid_dac; + static inline int dma_supported(struct device *dev, u64 mask) { @@ -134,6 +136,10 @@ dma_supported(struct device *dev, u64 mask) if(mask < 0x00ffffff) return 0; + /* Work around chipset bugs */ + if (forbid_dac > 0 && mask > 0xffffffffULL) + return 0; + return 1; } diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 91803ba30ff2..3ca6a076124d 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -72,4 +72,13 @@ extern int allocate_irqno(void); extern void alloc_legacy_irqno(void); extern void free_irqno(unsigned int irq); +/* + * Before R2 the timer and performance counter interrupts were both fixed to + * IE7. Since R2 their number has to be read from the c0_intctl register. + */ +#define CP0_LEGACY_COMPARE_IRQ 7 + +extern int cp0_compare_irq; +extern int cp0_perfcount_irq; + #endif /* _ASM_IRQ_H */ diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h index 76add42e486e..93ba1c1b2a4f 100644 --- a/include/asm-mips/mips-boards/atlasint.h +++ b/include/asm-mips/mips-boards/atlasint.h @@ -28,11 +28,6 @@ #include <irq.h> -/* - * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode) - */ -#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE - /* CPU interrupt offsets */ #define MIPSCPU_INT_SW0 0 #define MIPSCPU_INT_SW1 1 @@ -42,7 +37,6 @@ #define MIPSCPU_INT_MB2 4 #define MIPSCPU_INT_MB3 5 #define MIPSCPU_INT_MB4 6 -#define MIPSCPU_INT_CPUCTR 7 /* * Interrupts 8..39 are used for Atlas interrupt controller interrupts diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h index 9180d6466113..7461318f1cd1 100644 --- a/include/asm-mips/mips-boards/maltaint.h +++ b/include/asm-mips/mips-boards/maltaint.h @@ -32,11 +32,6 @@ */ #define MALTA_INT_BASE 0 -/* - * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode) - */ -#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE - /* CPU interrupt offsets */ #define MIPSCPU_INT_SW0 0 #define MIPSCPU_INT_SW1 1 @@ -49,7 +44,6 @@ #define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3 #define MIPSCPU_INT_MB4 6 #define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4 -#define MIPSCPU_INT_CPUCTR 7 /* * Interrupts 64..127 are used for Soc-it Classic interrupts diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h index 4f6a3933699d..e710bae07340 100644 --- a/include/asm-mips/mips-boards/seadint.h +++ b/include/asm-mips/mips-boards/seadint.h @@ -22,14 +22,7 @@ #include <irq.h> -/* - * Interrupts 0..7 are used for SEAD CPU interrupts - */ -#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE - #define MIPSCPU_INT_UART0 2 #define MIPSCPU_INT_UART1 3 -#define MIPSCPU_INT_CPUCTR 7 - #endif /* !(_MIPS_SEADINT_H) */ diff --git a/include/asm-mips/mips-boards/simint.h b/include/asm-mips/mips-boards/simint.h index 54f2fe621d69..8ef6db76d5c1 100644 --- a/include/asm-mips/mips-boards/simint.h +++ b/include/asm-mips/mips-boards/simint.h @@ -21,15 +21,11 @@ #define SIM_INT_BASE 0 #define MIPSCPU_INT_MB0 2 -#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE #define MIPS_CPU_TIMER_IRQ 7 -#define MIPSCPU_INT_CPUCTR 7 - #define MSC01E_INT_BASE 64 -#define MIPSCPU_INT_CPUCTR 7 #define MSC01E_INT_CPUCTR 11 #endif diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 7e9afa720d43..21fbfc5afd02 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -188,7 +188,6 @@ static inline void set_eiem(unsigned long val) # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) #endif -#define KERNEL_START (0x10100000 - 0x1000) #define arch_align_stack(x) (x) #endif diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index e0fcea8c64c3..5cb480af65d5 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -216,6 +216,11 @@ static inline void cpu_relax(void) barrier(); } +static inline void psw_set_key(unsigned int key) +{ + asm volatile("spka 0(%0)" : : "d" (key)); +} + /* * Set PSW to specified value. */ diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index fa6ca87080e8..332ee73688fc 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h @@ -470,14 +470,7 @@ struct user_regs_struct #define regs_return_value(regs)((regs)->gprs[2]) #define profile_pc(regs) instruction_pointer(regs) extern void show_regs(struct pt_regs * regs); -#endif - -static inline void -psw_set_key(unsigned int key) -{ - asm volatile("spka 0(%0)" : : "d" (key)); -} - +#endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* _S390_PTRACE_H */ diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index ae1ed05f2814..8696f8ad401e 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -626,9 +626,9 @@ __SYSCALL(__NR_utimensat, sys_utimensat) __SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) #define __NR_signalfd 282 __SYSCALL(__NR_signalfd, sys_signalfd) -#define __NR_timerfd 282 +#define __NR_timerfd 283 __SYSCALL(__NR_timerfd, sys_timerfd) -#define __NR_eventfd 283 +#define __NR_eventfd 284 __SYSCALL(__NR_eventfd, sys_eventfd) #ifndef __NO_STUBS diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 4fb552d12f7a..7d1eaa97de13 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -54,6 +54,7 @@ enum { MLX4_CMD_INIT_PORT = 0x9, MLX4_CMD_CLOSE_PORT = 0xa, MLX4_CMD_QUERY_HCA = 0xb, + MLX4_CMD_QUERY_PORT = 0x43, MLX4_CMD_SET_PORT = 0xc, MLX4_CMD_ACCESS_DDR = 0x2e, MLX4_CMD_MAP_ICM = 0xffa, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 8c5f8fd86841..b372f5910fc1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -41,6 +41,7 @@ enum { MLX4_FLAG_MSI_X = 1 << 0, + MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, }; enum { @@ -131,10 +132,10 @@ enum { struct mlx4_caps { u64 fw_ver; int num_ports; - int vl_cap; - int mtu_cap; - int gid_table_len; - int pkey_table_len; + int vl_cap[MLX4_MAX_PORTS + 1]; + int mtu_cap[MLX4_MAX_PORTS + 1]; + int gid_table_len[MLX4_MAX_PORTS + 1]; + int pkey_table_len[MLX4_MAX_PORTS + 1]; int local_ca_ack_delay; int num_uars; int bf_reg_size; @@ -174,7 +175,7 @@ struct mlx4_caps { u32 page_size_cap; u32 flags; u16 stat_rate_support; - u8 port_width_cap; + u8 port_width_cap[MLX4_MAX_PORTS + 1]; }; struct mlx4_buf_list { @@ -322,7 +323,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); -int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port); +int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 9eeb61adf6a3..10c57d279144 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -269,6 +269,10 @@ struct mlx4_wqe_data_seg { __be64 addr; }; +enum { + MLX4_INLINE_ALIGN = 64, +}; + struct mlx4_wqe_inline_seg { __be32 byte_count; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index e4183c6c7de3..1c1207472bb4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -603,6 +603,10 @@ static inline struct address_space *page_mapping(struct page *page) if (unlikely(PageSwapCache(page))) mapping = &swapper_space; +#ifdef CONFIG_SLUB + else if (unlikely(PageSlab(page))) + mapping = NULL; +#endif else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 588c99da0307..329ce0172074 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -353,9 +353,40 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) * it should be restarted. */ if (timr->it.real.interval.tv64 != 0) { + ktime_t now = hrtimer_cb_get_time(timer); + + /* + * FIXME: What we really want, is to stop this + * timer completely and restart it in case the + * SIG_IGN is removed. This is a non trivial + * change which involves sighand locking + * (sigh !), which we don't want to do late in + * the release cycle. + * + * For now we just let timers with an interval + * less than a jiffie expire every jiffie to + * avoid softirq starvation in case of SIG_IGN + * and a very small interval, which would put + * the timer right back on the softirq pending + * list. By moving now ahead of time we trick + * hrtimer_forward() to expire the timer + * later, while we still maintain the overrun + * accuracy, but have some inconsistency in + * the timer_gettime() case. This is at least + * better than a starved softirq. A more + * complex fix which solves also another related + * inconsistency is already in the pipeline. + */ +#ifdef CONFIG_HIGH_RES_TIMERS + { + ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); + + if (timr->it.real.interval.tv64 < kj.tv64) + now = ktime_add(now, kj); + } +#endif timr->it_overrun += - hrtimer_forward(timer, - hrtimer_cb_get_time(timer), + hrtimer_forward(timer, now, timr->it.real.interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; diff --git a/mm/mmap.c b/mm/mmap.c index 68b9ad2ef1d6..906ed402f7ca 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1536,9 +1536,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. + * Also guard against wrapping around to address 0. */ - address += 4 + PAGE_SIZE - 1; - address &= PAGE_MASK; + if (address < PAGE_ALIGN(address+4)) + address = PAGE_ALIGN(address+4); + else { + anon_vma_unlock(vma); + return -ENOMEM; + } error = 0; /* Somebody else might have raced and expanded it already */ diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c index 5a2bef44a2f5..7a22f0f3784a 100644 --- a/sound/ppc/pmac.c +++ b/sound/ppc/pmac.c @@ -775,7 +775,8 @@ static int snd_pmac_free(struct snd_pmac *chip) out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff); } - snd_pmac_sound_feature(chip, 0); + if (chip->node) + snd_pmac_sound_feature(chip, 0); /* clean up mixer if any */ if (chip->mixer_free) @@ -925,6 +926,7 @@ static int __init snd_pmac_detect(struct snd_pmac *chip) } if (! sound) { of_node_put(chip->node); + chip->node = NULL; return -ENODEV; } prop = of_get_property(sound, "sub-frame", NULL); @@ -937,7 +939,9 @@ static int __init snd_pmac_detect(struct snd_pmac *chip) printk(KERN_INFO "snd-powermac no longer handles any " "machines with a layout-id property " "in the device-tree, use snd-aoa.\n"); + of_node_put(sound); of_node_put(chip->node); + chip->node = NULL; return -ENODEV; } /* This should be verified on older screamers */ @@ -1297,8 +1301,6 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) return 0; __error: - if (chip->pdev) - pci_dev_put(chip->pdev); snd_pmac_free(chip); return err; } |