diff options
698 files changed, 7263 insertions, 5050 deletions
diff --git a/.gitignore b/.gitignore index 8faa6c02b39e..5d56a3fd0de6 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ modules.builtin *.gz *.bz2 *.lzma +*.xz *.lzo *.patch *.gcno diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl index 5e87ad58c0b5..f51f28531b8d 100644 --- a/Documentation/DocBook/filesystems.tmpl +++ b/Documentation/DocBook/filesystems.tmpl @@ -82,6 +82,11 @@ </sect1> </chapter> + <chapter id="fs_events"> + <title>Events based on file descriptors</title> +!Efs/eventfd.c + </chapter> + <chapter id="sysfs"> <title>The Filesystem for Exporting Kernel Objects</title> !Efs/sysfs/file.c diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42 index 0e76ef12e4c6..a22ecf48f255 100644 --- a/Documentation/hwmon/jc42 +++ b/Documentation/hwmon/jc42 @@ -51,7 +51,8 @@ Supported chips: * JEDEC JC 42.4 compliant temperature sensor chips Prefix: 'jc42' Addresses scanned: I2C 0x18 - 0x1f - Datasheet: - + Datasheet: + http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf Author: Guenter Roeck <guenter.roeck@ericsson.com> @@ -60,7 +61,11 @@ Author: Description ----------- -This driver implements support for JEDEC JC 42.4 compliant temperature sensors. +This driver implements support for JEDEC JC 42.4 compliant temperature sensors, +which are used on many DDR3 memory modules for mobile devices and servers. Some +systems use the sensor to prevent memory overheating by automatically throttling +the memory controller. + The driver auto-detects the chips listed above, but can be manually instantiated to support other JC 42.4 compliant chips. @@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis, which applies to all limits. This register can be written by writing into temp1_crit_hyst. Other hysteresis attributes are read-only. +If the BIOS has configured the sensor for automatic temperature management, it +is likely that it has locked the registers, i.e., that the temperature limits +cannot be changed. + Sysfs entries ------------- temp1_input Temperature (RO) -temp1_min Minimum temperature (RW) -temp1_max Maximum temperature (RW) -temp1_crit Critical high temperature (RW) +temp1_min Minimum temperature (RO or RW) +temp1_max Maximum temperature (RO or RW) +temp1_crit Critical high temperature (RO or RW) -temp1_crit_hyst Critical hysteresis temperature (RW) +temp1_crit_hyst Critical hysteresis temperature (RO or RW) temp1_max_hyst Maximum hysteresis temperature (RO) temp1_min_alarm Temperature low alarm diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp index 6526eee525a6..d2b56a4fd1f5 100644 --- a/Documentation/hwmon/k10temp +++ b/Documentation/hwmon/k10temp @@ -9,6 +9,8 @@ Supported chips: Socket S1G3: Athlon II, Sempron, Turion II * AMD Family 11h processors: Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) +* AMD Family 12h processors: "Llano" +* AMD Family 14h processors: "Brazos" (C/E/G-Series) Prefix: 'k10temp' Addresses scanned: PCI space @@ -17,10 +19,14 @@ Supported chips: http://support.amd.com/us/Processor_TechDocs/31116.pdf BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: http://support.amd.com/us/Processor_TechDocs/41256.pdf + BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors: + http://support.amd.com/us/Processor_TechDocs/43170.pdf Revision Guide for AMD Family 10h Processors: http://support.amd.com/us/Processor_TechDocs/41322.pdf Revision Guide for AMD Family 11h Processors: http://support.amd.com/us/Processor_TechDocs/41788.pdf + Revision Guide for AMD Family 14h Models 00h-0Fh Processors: + http://support.amd.com/us/Processor_TechDocs/47534.pdf AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: http://support.amd.com/us/Processor_TechDocs/43373.pdf AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: @@ -34,7 +40,7 @@ Description ----------- This driver permits reading of the internal temperature sensor of AMD -Family 10h and 11h processors. +Family 10h/11h/12h/14h processors. All these processors have a sensor, but on those for Socket F or AM2+, the sensor may return inconsistent values (erratum 319). The driver diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 89835a4766a6..f4a04c0c7edc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture and is between 256 and 4096 characters. It is defined in the file ./include/asm/setup.h as COMMAND_LINE_SIZE. +Finally, the [KMG] suffix is commonly described after a number of kernel +parameter values. These 'K', 'M', and 'G' letters represent the _binary_ +multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30 +bytes respectively. Such letter suffixes can also be entirely omitted. + acpi= [HW,ACPI,X86] Advanced Configuration and Power Interface @@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file Format: <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] - crashkernel=nn[KMG]@ss[KMG] - [KNL] Reserve a chunk of physical memory to - hold a kernel to switch to with kexec on panic. + crashkernel=size[KMG][@offset[KMG]] + [KNL] Using kexec, Linux can switch to a 'crash kernel' + upon panic. This parameter reserves the physical + memory region [offset, offset + size] for that kernel + image. If '@offset' is omitted, then a suitable offset + is selected automatically. Check + Documentation/kdump/kdump.txt for further details. crashkernel=range1:size1[,range2:size2,...][@offset] [KNL] Same as above, but depends on the memory in the running system. The syntax of range is start-[end] where start and end are both a memory unit (amount[KMG]). See also - Documentation/kdump/kdump.txt for a example. + Documentation/kdump/kdump.txt for an example. cs89x0_dma= [HW,NET] Format: <dma> @@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file 6 (KERN_INFO) informational 7 (KERN_DEBUG) debug-level messages - log_buf_len=n Sets the size of the printk ring buffer, in bytes. - Format: { n | nk | nM } - n must be a power of two. The default size - is set in the kernel config file. + log_buf_len=n[KMG] Sets the size of the printk ring buffer, + in bytes. n must be a power of two. The default + size is set in the kernel config file. logo.nologo [FB] Disables display of the built-in Linux logo. This may be used to provide more screen space for diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index fe5c099b8fc8..4edd78dfb362 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -40,8 +40,6 @@ decnet.txt - info on using the DECnet networking layer in Linux. depca.txt - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver -dgrs.txt - - the Digi International RightSwitch SE-X Ethernet driver dmfe.txt - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. e100.txt @@ -50,8 +48,6 @@ e1000.txt - info on Intel's E1000 line of gigabit ethernet boards eql.txt - serial IP load balancing -ethertap.txt - - the Ethertap user space packet reception and transmission driver ewrk3.txt - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver filter.txt @@ -104,8 +100,6 @@ tuntap.txt - TUN/TAP device driver, allowing user space Rx/Tx of packets. vortex.txt - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. -wavelan.txt - - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver x25.txt - general info on X.25 development. x25-iface.txt diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile index 5aba7a33aeeb..24c308dd3fd1 100644 --- a/Documentation/networking/Makefile +++ b/Documentation/networking/Makefile @@ -4,6 +4,8 @@ obj- := dummy.o # List of programs to build hostprogs-y := ifenslave +HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include + # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt index aefd1e681804..04ca06325b08 100644 --- a/Documentation/networking/dns_resolver.txt +++ b/Documentation/networking/dns_resolver.txt @@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken. create dns_resolver foo:* * /usr/sbin/dns.foo %k - ===== USAGE ===== @@ -104,6 +103,14 @@ implemented in the module can be called after doing: returned also. +=============================== +READING DNS KEYS FROM USERSPACE +=============================== + +Keys of dns_resolver type can be read from userspace using keyctl_read() or +"keyctl read/print/pipe". + + ========= MECHANISM ========= diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index 996a27d9b8db..01c513fac40e 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt @@ -190,9 +190,9 @@ resources, scheduled and executed. * Long running CPU intensive workloads which can be better managed by the system scheduler. - WQ_FREEZEABLE + WQ_FREEZABLE - A freezeable wq participates in the freeze phase of the system + A freezable wq participates in the freeze phase of the system suspend operations. Work items on the wq are drained and no new work item starts execution until thawed. diff --git a/MAINTAINERS b/MAINTAINERS index 5dd6c751e6a6..f1bc3dc6b369 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -885,7 +885,7 @@ S: Supported ARM/QUALCOMM MSM MACHINE SUPPORT M: David Brown <davidb@codeaurora.org> -M: Daniel Walker <dwalker@codeaurora.org> +M: Daniel Walker <dwalker@fifo99.com> M: Bryan Huntsman <bryanh@codeaurora.org> L: linux-arm-msm@vger.kernel.org F: arch/arm/mach-msm/ @@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-s5p*/ +ARM/SAMSUNG MOBILE MACHINE SUPPORT +M: Kyungmin Park <kyungmin.park@samsung.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-s5pv210/mach-aquila.c +F: arch/arm/mach-s5pv210/mach-goni.c +F: arch/arm/mach-exynos4/mach-universal_c210.c +F: arch/arm/mach-exynos4/mach-nuri.c + ARM/SAMSUNG S5P SERIES FIMC SUPPORT M: Kyungmin Park <kyungmin.park@samsung.com> M: Sylwester Nawrocki <s.nawrocki@samsung.com> @@ -1467,6 +1476,7 @@ F: include/net/bluetooth/ BONDING DRIVER M: Jay Vosburgh <fubar@us.ibm.com> +M: Andy Gospodarek <andy@greyhouse.net> L: netdev@vger.kernel.org W: http://sourceforge.net/projects/bonding/ S: Supported @@ -1692,6 +1702,13 @@ M: Andy Whitcroft <apw@canonical.com> S: Supported F: scripts/checkpatch.pl +CHINESE DOCUMENTATION +M: Harry Wei <harryxiyou@gmail.com> +L: xiyoulinuxkernelgroup@googlegroups.com +L: linux-kernel@zh-kernel.org (moderated for non-subscribers) +S: Maintained +F: Documentation/zh_CN/ + CISCO VIC ETHERNET NIC DRIVER M: Vasanthy Kolluri <vkolluri@cisco.com> M: Roopa Prabhu <roprabhu@cisco.com> @@ -2026,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt F: drivers/scsi/dc395x.* DCCP PROTOCOL -M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> +M: Gerrit Renker <gerrit@erg.abdn.ac.uk> L: dccp@vger.kernel.org W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp S: Maintained @@ -2873,7 +2890,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com> L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ -T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained F: Documentation/hwmon/ @@ -3513,7 +3529,7 @@ F: drivers/hwmon/jc42.c F: Documentation/hwmon/jc42 JFS FILESYSTEM -M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> +M: Dave Kleikamp <shaggy@kernel.org> L: jfs-discussion@lists.sourceforge.net W: http://jfs.sourceforge.net/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git @@ -4276,10 +4292,7 @@ S: Maintained F: net/sched/sch_netem.c NETERION 10GbE DRIVERS (s2io/vxge) -M: Ramkrishna Vepa <ramkrishna.vepa@exar.com> -M: Sivakumar Subramani <sivakumar.subramani@exar.com> -M: Sreenivasa Honnur <sreenivasa.honnur@exar.com> -M: Jon Mason <jon.mason@exar.com> +M: Jon Mason <jdmason@kudzu.us> L: netdev@vger.kernel.org W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous @@ -5165,6 +5178,7 @@ F: drivers/char/random.c RAPIDIO SUBSYSTEM M: Matt Porter <mporter@kernel.crashing.org> +M: Alexandre Bounine <alexandre.bounine@idt.com> S: Maintained F: drivers/rapidio/ @@ -5267,7 +5281,7 @@ S: Maintained F: drivers/net/wireless/rtl818x/rtl8180/ RTL8187 WIRELESS DRIVER -M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> +M: Herton Ronaldo Krzesinski <herton@canonical.com> M: Hin-Tak Leung <htl10@users.sourceforge.net> M: Larry Finger <Larry.Finger@lwfinger.net> L: linux-wireless@vger.kernel.org @@ -6105,7 +6119,7 @@ S: Maintained F: security/tomoyo/ TOPSTAR LAPTOP EXTRAS DRIVER -M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> +M: Herton Ronaldo Krzesinski <herton@canonical.com> L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/topstar-laptop.c @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 38 -EXTRAVERSION = -rc5 +EXTRAVERSION = NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 47f63d480141..cc31bec2e316 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -11,6 +11,7 @@ config ALPHA select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE select AUTO_IRQ_AFFINITY if SMP + select GENERIC_HARDIRQS_NO_DEPRECATED help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 9ab234f48dd8..a19d60082299 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS]; int irq_select_affinity(unsigned int irq) { - struct irq_desc *desc = irq_to_desc[irq]; + struct irq_data *data = irq_get_irq_data(irq); + struct irq_chip *chip; static int last_cpu; int cpu = last_cpu + 1; - if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) + if (!data) + return 1; + chip = irq_data_get_irq_chip(data); + + if (!chip->irq_set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || @@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; - cpumask_copy(desc->affinity, cpumask_of(cpu)); - get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); + cpumask_copy(data->affinity, cpumask_of(cpu)); + chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; } #endif /* CONFIG_SMP */ diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 2d0679b60939..411ca11d0a18 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -228,14 +228,9 @@ struct irqaction timer_irqaction = { void __init init_rtc_irq(void) { - struct irq_desc *desc = irq_to_desc(RTC_IRQ); - - if (desc) { - desc->status |= IRQ_DISABLED; - set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, - handle_simple_irq, "RTC"); - setup_irq(RTC_IRQ, &timer_irqaction); - } + set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, + handle_simple_irq, "RTC"); + setup_irq(RTC_IRQ, &timer_irqaction); } /* Dummy irqactions. */ diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 956ea0ed1694..c7cc9813e45f 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c @@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask) } inline void -i8259a_enable_irq(unsigned int irq) +i8259a_enable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); - i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); + i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); spin_unlock(&i8259_irq_lock); } @@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq) } void -i8259a_disable_irq(unsigned int irq) +i8259a_disable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); - __i8259a_disable_irq(irq); + __i8259a_disable_irq(d->irq); spin_unlock(&i8259_irq_lock); } void -i8259a_mask_and_ack_irq(unsigned int irq) +i8259a_mask_and_ack_irq(struct irq_data *d) { + unsigned int irq = d->irq; + spin_lock(&i8259_irq_lock); __i8259a_disable_irq(irq); @@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq) struct irq_chip i8259a_irq_type = { .name = "XT-PIC", - .unmask = i8259a_enable_irq, - .mask = i8259a_disable_irq, - .mask_ack = i8259a_mask_and_ack_irq, + .irq_unmask = i8259a_enable_irq, + .irq_mask = i8259a_disable_irq, + .irq_mask_ack = i8259a_mask_and_ack_irq, }; void __init diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h index b63ccd7386f1..d507a234b05d 100644 --- a/arch/alpha/kernel/irq_impl.h +++ b/arch/alpha/kernel/irq_impl.h @@ -31,11 +31,9 @@ extern void init_rtc_irq(void); extern void common_init_isa_dma(void); -extern void i8259a_enable_irq(unsigned int); -extern void i8259a_disable_irq(unsigned int); -extern void i8259a_mask_and_ack_irq(unsigned int); -extern unsigned int i8259a_startup_irq(unsigned int); -extern void i8259a_end_irq(unsigned int); +extern void i8259a_enable_irq(struct irq_data *d); +extern void i8259a_disable_irq(struct irq_data *d); +extern void i8259a_mask_and_ack_irq(struct irq_data *d); extern struct irq_chip i8259a_irq_type; extern void init_i8259a_irqs(void); diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index 2863458c853e..b30227fa7f5f 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c @@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask) } static inline void -pyxis_enable_irq(unsigned int irq) +pyxis_enable_irq(struct irq_data *d) { - pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); + pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void -pyxis_disable_irq(unsigned int irq) +pyxis_disable_irq(struct irq_data *d) { - pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); + pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static void -pyxis_mask_and_ack_irq(unsigned int irq) +pyxis_mask_and_ack_irq(struct irq_data *d) { - unsigned long bit = 1UL << (irq - 16); + unsigned long bit = 1UL << (d->irq - 16); unsigned long mask = cached_irq_mask &= ~bit; /* Disable the interrupt. */ @@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq) static struct irq_chip pyxis_irq_type = { .name = "PYXIS", - .mask_ack = pyxis_mask_and_ack_irq, - .mask = pyxis_disable_irq, - .unmask = pyxis_enable_irq, + .irq_mask_ack = pyxis_mask_and_ack_irq, + .irq_mask = pyxis_disable_irq, + .irq_unmask = pyxis_enable_irq, }; void @@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask) if ((ignore_mask >> i) & 1) continue; set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_set_status_flags(i, IRQ_LEVEL); } setup_irq(16+7, &isa_cascade_irqaction); diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index 0e57e828b413..82a47bba41c4 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c @@ -18,27 +18,27 @@ DEFINE_SPINLOCK(srm_irq_lock); static inline void -srm_enable_irq(unsigned int irq) +srm_enable_irq(struct irq_data *d) { spin_lock(&srm_irq_lock); - cserve_ena(irq - 16); + cserve_ena(d->irq - 16); spin_unlock(&srm_irq_lock); } static void -srm_disable_irq(unsigned int irq) +srm_disable_irq(struct irq_data *d) { spin_lock(&srm_irq_lock); - cserve_dis(irq - 16); + cserve_dis(d->irq - 16); spin_unlock(&srm_irq_lock); } /* Handle interrupts from the SRM, assuming no additional weirdness. */ static struct irq_chip srm_irq_type = { .name = "SRM", - .unmask = srm_enable_irq, - .mask = srm_disable_irq, - .mask_ack = srm_disable_irq, + .irq_unmask = srm_enable_irq, + .irq_mask = srm_disable_irq, + .irq_mask_ack = srm_disable_irq, }; void __init @@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask) if (i < 64 && ((ignore_mask >> i) & 1)) continue; set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_set_status_flags(i, IRQ_LEVEL); } } diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 7bef61768236..88d95e872f55 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c @@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask) } static inline void -alcor_enable_irq(unsigned int irq) +alcor_enable_irq(struct irq_data *d) { - alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); + alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void -alcor_disable_irq(unsigned int irq) +alcor_disable_irq(struct irq_data *d) { - alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); + alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static void -alcor_mask_and_ack_irq(unsigned int irq) +alcor_mask_and_ack_irq(struct irq_data *d) { - alcor_disable_irq(irq); + alcor_disable_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ - *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); + *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static void -alcor_isa_mask_and_ack_irq(unsigned int irq) +alcor_isa_mask_and_ack_irq(struct irq_data *d) { - i8259a_mask_and_ack_irq(irq); + i8259a_mask_and_ack_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); @@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq) static struct irq_chip alcor_irq_type = { .name = "ALCOR", - .unmask = alcor_enable_irq, - .mask = alcor_disable_irq, - .mask_ack = alcor_mask_and_ack_irq, + .irq_unmask = alcor_enable_irq, + .irq_mask = alcor_disable_irq, + .irq_mask_ack = alcor_mask_and_ack_irq, }; static void @@ -126,9 +126,9 @@ alcor_init_irq(void) if (i >= 16+20 && i <= 16+30) continue; set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_set_status_flags(i, IRQ_LEVEL); } - i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; + i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; init_i8259a_irqs(); common_init_isa_dma(); diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index b0c916493aea..57eb6307bc27 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) } static inline void -cabriolet_enable_irq(unsigned int irq) +cabriolet_enable_irq(struct irq_data *d) { - cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); + cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); } static void -cabriolet_disable_irq(unsigned int irq) +cabriolet_disable_irq(struct irq_data *d) { - cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); + cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); } static struct irq_chip cabriolet_irq_type = { .name = "CABRIOLET", - .unmask = cabriolet_enable_irq, - .mask = cabriolet_disable_irq, - .mask_ack = cabriolet_disable_irq, + .irq_unmask = cabriolet_enable_irq, + .irq_mask = cabriolet_disable_irq, + .irq_mask_ack = cabriolet_disable_irq, }; static void @@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) for (i = 16; i < 35; ++i) { set_irq_chip_and_handler(i, &cabriolet_irq_type, handle_level_irq); - irq_to_desc(i)->status |= IRQ_LEVEL; + irq_set_status_flags(i, IRQ_LEVEL); } } diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index edad5f759ccd..481df4ecb651 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask) } static void -dp264_enable_irq(unsigned int irq) +dp264_enable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); - cached_irq_mask |= 1UL << irq; + cached_irq_mask |= 1UL << d->irq; tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void -dp264_disable_irq(unsigned int irq) +dp264_disable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); - cached_irq_mask &= ~(1UL << irq); + cached_irq_mask &= ~(1UL << d->irq); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void -clipper_enable_irq(unsigned int irq) +clipper_enable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); - cached_irq_mask |= 1UL << (irq - 16); + cached_irq_mask |= 1UL << (d->irq - 16); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void -clipper_disable_irq(unsigned int irq) +clipper_disable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); - cached_irq_mask &= ~(1UL << (irq - 16)); + cached_irq_mask &= ~(1UL << (d->irq - 16)); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } @@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) } static int -dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) -{ +dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, + bool force) +{ spin_lock(&dp264_irq_lock); - cpu_set_irq_affinity(irq, *affinity); + cpu_set_irq_affinity(d->irq, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); @@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) } static int -clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) -{ +clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, + bool force) +{ spin_lock(&dp264_irq_lock); - cpu_set_irq_affinity(irq - 16, *affinity); + cpu_set_irq_affinity(d->irq - 16, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); @@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) } static struct irq_chip dp264_irq_type = { - .name = "DP264", - .unmask = dp264_enable_irq, - .mask = dp264_disable_irq, - .mask_ack = dp264_disable_irq, - .set_affinity = dp264_set_affinity, + .name = "DP264", + .irq_unmask = dp264_enable_irq, + .irq_mask = dp264_disable_irq, + .irq_mask_ack = dp264_disable_irq, + .irq_set_affinity = dp264_set_affinity, }; static struct irq_chip clipper_irq_type = { - .name = "CLIPPER", - .unmask = clipper_enable_irq, - .mask = clipper_disable_irq, - .mask_ack = clipper_disable_irq, - .set_affinity = clipper_set_affinity, + .name = "CLIPPER", + .irq_unmask = clipper_enable_irq, + .irq_mask = clipper_disable_irq, + .irq_mask_ack = clipper_disable_irq, + .irq_set_affinity = clipper_set_affinity, }; static void @@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, ops, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } } diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index ae5f29d127b0..402e908ffb3e 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c @@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask) } static inline void -eb64p_enable_irq(unsigned int irq) +eb64p_enable_irq(struct irq_data *d) { - eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); + eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); } static void -eb64p_disable_irq(unsigned int irq) +eb64p_disable_irq(struct irq_data *d) { - eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); + eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); } static struct irq_chip eb64p_irq_type = { .name = "EB64P", - .unmask = eb64p_enable_irq, - .mask = eb64p_disable_irq, - .mask_ack = eb64p_disable_irq, + .irq_unmask = eb64p_enable_irq, + .irq_mask = eb64p_disable_irq, + .irq_mask_ack = eb64p_disable_irq, }; static void @@ -118,9 +118,9 @@ eb64p_init_irq(void) init_i8259a_irqs(); for (i = 16; i < 32; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); - } + irq_set_status_flags(i, IRQ_LEVEL); + } common_init_isa_dma(); setup_irq(16+5, &isa_cascade_irqaction); diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 1121bc5c6c6c..0b44a54c1522 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c @@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask) } static inline void -eiger_enable_irq(unsigned int irq) +eiger_enable_irq(struct irq_data *d) { + unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); eiger_update_irq_hw(irq, mask); } static void -eiger_disable_irq(unsigned int irq) +eiger_disable_irq(struct irq_data *d) { + unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); eiger_update_irq_hw(irq, mask); @@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq) static struct irq_chip eiger_irq_type = { .name = "EIGER", - .unmask = eiger_enable_irq, - .mask = eiger_disable_irq, - .mask_ack = eiger_disable_irq, + .irq_unmask = eiger_enable_irq, + .irq_mask = eiger_disable_irq, + .irq_mask_ack = eiger_disable_irq, }; static void @@ -136,8 +138,8 @@ eiger_init_irq(void) init_i8259a_irqs(); for (i = 16; i < 128; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } } diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 34f55e03d331..00341b75c8b2 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -63,34 +63,34 @@ */ static void -jensen_local_enable(unsigned int irq) +jensen_local_enable(struct irq_data *d) { /* the parport is really hw IRQ 1, silly Jensen. */ - if (irq == 7) - i8259a_enable_irq(1); + if (d->irq == 7) + i8259a_enable_irq(d); } static void -jensen_local_disable(unsigned int irq) +jensen_local_disable(struct irq_data *d) { /* the parport is really hw IRQ 1, silly Jensen. */ - if (irq == 7) - i8259a_disable_irq(1); + if (d->irq == 7) + i8259a_disable_irq(d); } static void -jensen_local_mask_ack(unsigned int irq) +jensen_local_mask_ack(struct irq_data *d) { /* the parport is really hw IRQ 1, silly Jensen. */ - if (irq == 7) - i8259a_mask_and_ack_irq(1); + if (d->irq == 7) + i8259a_mask_and_ack_irq(d); } static struct irq_chip jensen_local_irq_type = { .name = "LOCAL", - .unmask = jensen_local_enable, - .mask = jensen_local_disable, - .mask_ack = jensen_local_mask_ack, + .irq_unmask = jensen_local_enable, + .irq_mask = jensen_local_disable, + .irq_mask_ack = jensen_local_mask_ack, }; static void diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 2bfc9f1b1ddc..e61910734e41 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) } static void -io7_enable_irq(unsigned int irq) +io7_enable_irq(struct irq_data *d) { volatile unsigned long *ctl; + unsigned int irq = d->irq; struct io7 *io7; ctl = io7_get_irq_ctl(irq, &io7); @@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq) __func__, irq); return; } - + spin_lock(&io7->irq_lock); *ctl |= 1UL << 24; mb(); @@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq) } static void -io7_disable_irq(unsigned int irq) +io7_disable_irq(struct irq_data *d) { volatile unsigned long *ctl; + unsigned int irq = d->irq; struct io7 *io7; ctl = io7_get_irq_ctl(irq, &io7); @@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq) __func__, irq); return; } - + spin_lock(&io7->irq_lock); *ctl &= ~(1UL << 24); mb(); @@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq) } static void -marvel_irq_noop(unsigned int irq) -{ - return; -} - -static unsigned int -marvel_irq_noop_return(unsigned int irq) -{ - return 0; +marvel_irq_noop(struct irq_data *d) +{ + return; } static struct irq_chip marvel_legacy_irq_type = { .name = "LEGACY", - .mask = marvel_irq_noop, - .unmask = marvel_irq_noop, + .irq_mask = marvel_irq_noop, + .irq_unmask = marvel_irq_noop, }; static struct irq_chip io7_lsi_irq_type = { .name = "LSI", - .unmask = io7_enable_irq, - .mask = io7_disable_irq, - .mask_ack = io7_disable_irq, + .irq_unmask = io7_enable_irq, + .irq_mask = io7_disable_irq, + .irq_mask_ack = io7_disable_irq, }; static struct irq_chip io7_msi_irq_type = { .name = "MSI", - .unmask = io7_enable_irq, - .mask = io7_disable_irq, - .ack = marvel_irq_noop, + .irq_unmask = io7_enable_irq, + .irq_mask = io7_disable_irq, + .irq_ack = marvel_irq_noop, }; static void @@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7, /* Set up the lsi irqs. */ for (i = 0; i < 128; ++i) { - irq_to_desc(base + i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } /* Disable the implemented irqs in hardware. */ @@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7, /* Set up the msi irqs. */ for (i = 128; i < (128 + 512); ++i) { - irq_to_desc(base + i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } for (i = 0; i < 16; ++i) diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index bcc1639e8efb..cf7f43dd3147 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c @@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask) } static inline void -mikasa_enable_irq(unsigned int irq) +mikasa_enable_irq(struct irq_data *d) { - mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); + mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); } static void -mikasa_disable_irq(unsigned int irq) +mikasa_disable_irq(struct irq_data *d) { - mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); + mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); } static struct irq_chip mikasa_irq_type = { .name = "MIKASA", - .unmask = mikasa_enable_irq, - .mask = mikasa_disable_irq, - .mask_ack = mikasa_disable_irq, + .irq_unmask = mikasa_enable_irq, + .irq_mask = mikasa_disable_irq, + .irq_mask_ack = mikasa_disable_irq, }; static void @@ -98,8 +98,8 @@ mikasa_init_irq(void) mikasa_update_irq_hw(0); for (i = 16; i < 32; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index e88f4ae1260e..92bc188e94a9 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c @@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask) } static void -noritake_enable_irq(unsigned int irq) +noritake_enable_irq(struct irq_data *d) { - noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); + noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); } static void -noritake_disable_irq(unsigned int irq) +noritake_disable_irq(struct irq_data *d) { - noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); + noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); } static struct irq_chip noritake_irq_type = { .name = "NORITAKE", - .unmask = noritake_enable_irq, - .mask = noritake_disable_irq, - .mask_ack = noritake_disable_irq, + .irq_unmask = noritake_enable_irq, + .irq_mask = noritake_disable_irq, + .irq_mask_ack = noritake_disable_irq, }; static void @@ -127,8 +127,8 @@ noritake_init_irq(void) outw(0, 0x54c); for (i = 16; i < 48; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 6a51364dd1cc..936d4140ed5f 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c @@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask) (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) static inline void -rawhide_enable_irq(unsigned int irq) +rawhide_enable_irq(struct irq_data *d) { unsigned int mask, hose; + unsigned int irq = d->irq; irq -= 16; hose = irq / 24; @@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq) } static void -rawhide_disable_irq(unsigned int irq) +rawhide_disable_irq(struct irq_data *d) { unsigned int mask, hose; + unsigned int irq = d->irq; irq -= 16; hose = irq / 24; @@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq) } static void -rawhide_mask_and_ack_irq(unsigned int irq) +rawhide_mask_and_ack_irq(struct irq_data *d) { unsigned int mask, mask1, hose; + unsigned int irq = d->irq; irq -= 16; hose = irq / 24; @@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq) static struct irq_chip rawhide_irq_type = { .name = "RAWHIDE", - .unmask = rawhide_enable_irq, - .mask = rawhide_disable_irq, - .mask_ack = rawhide_mask_and_ack_irq, + .irq_unmask = rawhide_enable_irq, + .irq_mask = rawhide_disable_irq, + .irq_mask_ack = rawhide_mask_and_ack_irq, }; static void @@ -177,8 +180,8 @@ rawhide_init_irq(void) } for (i = 16; i < 128; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index 89e7e37ec84c..cea22a62913b 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c @@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask) } static inline void -rx164_enable_irq(unsigned int irq) +rx164_enable_irq(struct irq_data *d) { - rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); + rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void -rx164_disable_irq(unsigned int irq) +rx164_disable_irq(struct irq_data *d) { - rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); + rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static struct irq_chip rx164_irq_type = { .name = "RX164", - .unmask = rx164_enable_irq, - .mask = rx164_disable_irq, - .mask_ack = rx164_disable_irq, + .irq_unmask = rx164_enable_irq, + .irq_mask = rx164_disable_irq, + .irq_mask_ack = rx164_disable_irq, }; static void @@ -99,8 +99,8 @@ rx164_init_irq(void) rx164_update_irq_hw(0); for (i = 16; i < 40; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 5c4423d1b06c..a349538aabc9 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c @@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp) /* GENERIC irq routines */ static inline void -sable_lynx_enable_irq(unsigned int irq) +sable_lynx_enable_irq(struct irq_data *d) { unsigned long bit, mask; - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); sable_lynx_irq_swizzle->update_irq_hw(bit, mask); @@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq) } static void -sable_lynx_disable_irq(unsigned int irq) +sable_lynx_disable_irq(struct irq_data *d) { unsigned long bit, mask; - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; sable_lynx_irq_swizzle->update_irq_hw(bit, mask); @@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq) } static void -sable_lynx_mask_and_ack_irq(unsigned int irq) +sable_lynx_mask_and_ack_irq(struct irq_data *d) { unsigned long bit, mask; - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; sable_lynx_irq_swizzle->update_irq_hw(bit, mask); @@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) static struct irq_chip sable_lynx_irq_type = { .name = "SABLE/LYNX", - .unmask = sable_lynx_enable_irq, - .mask = sable_lynx_disable_irq, - .mask_ack = sable_lynx_mask_and_ack_irq, + .irq_unmask = sable_lynx_enable_irq, + .irq_mask = sable_lynx_disable_irq, + .irq_mask_ack = sable_lynx_mask_and_ack_irq, }; static void @@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs) long i; for (i = 0; i < nr_of_irqs; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &sable_lynx_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index f8a1e8a862fb..42a5331f13c4 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c @@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask) } static inline void -takara_enable_irq(unsigned int irq) +takara_enable_irq(struct irq_data *d) { + unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); takara_update_irq_hw(irq, mask); } static void -takara_disable_irq(unsigned int irq) +takara_disable_irq(struct irq_data *d) { + unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); takara_update_irq_hw(irq, mask); @@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq) static struct irq_chip takara_irq_type = { .name = "TAKARA", - .unmask = takara_enable_irq, - .mask = takara_disable_irq, - .mask_ack = takara_disable_irq, + .irq_unmask = takara_enable_irq, + .irq_mask = takara_disable_irq, + .irq_mask_ack = takara_disable_irq, }; static void @@ -136,8 +138,8 @@ takara_init_irq(void) takara_update_irq_hw(i, -1); for (i = 16; i < 128; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index e02494bf5ef3..8c13a0c77830 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask) } static inline void -titan_enable_irq(unsigned int irq) +titan_enable_irq(struct irq_data *d) { + unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cached_irq_mask |= 1UL << (irq - 16); titan_update_irq_hw(titan_cached_irq_mask); @@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq) } static inline void -titan_disable_irq(unsigned int irq) +titan_disable_irq(struct irq_data *d) { + unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cached_irq_mask &= ~(1UL << (irq - 16)); titan_update_irq_hw(titan_cached_irq_mask); @@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) } static int -titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) +titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, + bool force) { + unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cpu_set_irq_affinity(irq - 16, *affinity); titan_update_irq_hw(titan_cached_irq_mask); @@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { - irq_to_desc(i)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i, ops, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); } } static struct irq_chip titan_irq_type = { - .name = "TITAN", - .unmask = titan_enable_irq, - .mask = titan_disable_irq, - .mask_ack = titan_disable_irq, - .set_affinity = titan_set_irq_affinity, + .name = "TITAN", + .irq_unmask = titan_enable_irq, + .irq_mask = titan_disable_irq, + .irq_mask_ack = titan_disable_irq, + .irq_set_affinity = titan_set_irq_affinity, }; static irqreturn_t diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index eec52594d410..ca60a387ef0a 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -104,10 +104,12 @@ wildfire_init_irq_hw(void) } static void -wildfire_enable_irq(unsigned int irq) +wildfire_enable_irq(struct irq_data *d) { + unsigned int irq = d->irq; + if (irq < 16) - i8259a_enable_irq(irq); + i8259a_enable_irq(d); spin_lock(&wildfire_irq_lock); set_bit(irq, &cached_irq_mask); @@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq) } static void -wildfire_disable_irq(unsigned int irq) +wildfire_disable_irq(struct irq_data *d) { + unsigned int irq = d->irq; + if (irq < 16) - i8259a_disable_irq(irq); + i8259a_disable_irq(d); spin_lock(&wildfire_irq_lock); clear_bit(irq, &cached_irq_mask); @@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq) } static void -wildfire_mask_and_ack_irq(unsigned int irq) +wildfire_mask_and_ack_irq(struct irq_data *d) { + unsigned int irq = d->irq; + if (irq < 16) - i8259a_mask_and_ack_irq(irq); + i8259a_mask_and_ack_irq(d); spin_lock(&wildfire_irq_lock); clear_bit(irq, &cached_irq_mask); @@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq) static struct irq_chip wildfire_irq_type = { .name = "WILDFIRE", - .unmask = wildfire_enable_irq, - .mask = wildfire_disable_irq, - .mask_ack = wildfire_mask_and_ack_irq, + .irq_unmask = wildfire_enable_irq, + .irq_mask = wildfire_disable_irq, + .irq_mask_ack = wildfire_mask_and_ack_irq, }; static void __init @@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) for (i = 0; i < 16; ++i) { if (i == 2) continue; - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, handle_level_irq); + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); } - irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, handle_level_irq); + irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); for (i = 40; i < 64; ++i) { - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, handle_level_irq); + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); } - setup_irq(32+irq_bias, &isa_enable); + setup_irq(32+irq_bias, &isa_enable); } static void __init diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 26d45e5b636b..166efa2a19cd 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622 visible impact on the overall performance or power consumption of the processor. +config ARM_ERRATA_751472 + bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 751472 Cortex-A9 (prior + to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the + completion of a following broadcasted operation if the second + operation is received by a CPU before the ICIALLUIS has completed, + potentially leading to corrupted entries in the cache or TLB. + +config ARM_ERRATA_753970 + bool "ARM errata: cache sync operation may be faulty" + depends on CACHE_PL310 + help + This option enables the workaround for the 753970 PL310 (r3p0) erratum. + + Under some condition the effect of cache sync operation on + the store buffer still remains when the operation completes. + This means that the store buffer is always asked to drain and + this prevents it from merging any further writes. The workaround + is to replace the normal offset of cache sync operation (0x730) + by another offset targeting an unmapped PL310 register 0x740. + This has the same effect as the cache sync operation: store buffer + drain and waiting for all buffers empty. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd6..6f7b29294c80 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 endif -OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS :=-O binary -R .comment -S GZFLAGS :=-9 #KBUILD_CFLAGS +=-pipe # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index ab204db594d3..c6028967d336 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,3 +1,7 @@ font.c -piggy.gz +lib1funcs.S +piggy.gzip +piggy.lzo +piggy.lzma +vmlinux vmlinux.lds diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 778655f0257a..ea5ee4d067f3 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -6,6 +6,8 @@ config ARM_VIC config ARM_VIC_NR int + default 4 if ARCH_S5PV210 + default 3 if ARCH_S5P6442 || ARCH_S5PC100 default 2 depends on ARM_VIC help diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735c..16bd48031583 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -36,6 +36,7 @@ #define L2X0_RAW_INTR_STAT 0x21C #define L2X0_INTR_CLEAR 0x220 #define L2X0_CACHE_SYNC 0x730 +#define L2X0_DUMMY_REG 0x740 #define L2X0_INV_LINE_PA 0x770 #define L2X0_INV_WAY 0x77C #define L2X0_CLEAN_LINE_PA 0x7B0 diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 721847dc68ab..e0d1c0cfa548 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -58,6 +58,9 @@ static inline void sysctl_soft_reset(void __iomem *base) { + /* switch to slow mode */ + writel(0x2, base + SCCTRL); + /* writing any value to SCSYSSTAT reg will reset system */ writel(0, base + SCSYSSTAT); } diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 3a0893a76a3b..bf13b814c1b8 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -15,10 +15,6 @@ struct meminfo; struct sys_timer; struct machine_desc { - /* - * Note! The first two elements are used - * by assembler code in head.S, head-common.S - */ unsigned int nr; /* architecture number */ const char *name; /* architecture name */ unsigned long boot_params; /* tagged list */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 9763be04f77e..22de005f159c 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -10,6 +10,8 @@ #ifndef _ASMARM_PGALLOC_H #define _ASMARM_PGALLOC_H +#include <linux/pagemap.h> + #include <asm/domain.h> #include <asm/pgtable-hwdef.h> #include <asm/processor.h> diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..82dfe5d0c41e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -18,16 +18,34 @@ #define __ASMARM_TLB_H #include <asm/cacheflush.h> -#include <asm/tlbflush.h> #ifndef CONFIG_MMU #include <linux/pagemap.h> + +#define tlb_flush(tlb) ((void) tlb) + #include <asm-generic/tlb.h> #else /* !CONFIG_MMU */ +#include <linux/swap.h> #include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +/* + * We need to delay page freeing for SMP as other CPUs can access pages + * which have been removed but not yet had their TLB entries invalidated. + * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, + * we need to apply this same delaying tactic to ensure correct operation. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) +#define tlb_fast_mode(tlb) 0 +#define FREE_PTE_NR 500 +#else +#define tlb_fast_mode(tlb) 1 +#define FREE_PTE_NR 0 +#endif /* * TLB handling. This allows us to remove pages from the page @@ -36,12 +54,58 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; + struct vm_area_struct *vma; unsigned long range_start; unsigned long range_end; + unsigned int nr; + struct page *pages[FREE_PTE_NR]; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +/* + * This is unnecessarily complex. There's three ways the TLB shootdown + * code is used: + * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). + * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. + * 2. Unmapping all vmas. See exit_mmap(). + * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. Additionally, page tables will be freed. + * 3. Unmapping argument pages. See shift_arg_pages(). + * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. + * tlb->vma will be NULL. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || !tlb->vma) + flush_tlb_mm(tlb->mm); + else if (tlb->range_end > 0) { + flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); + tlb->range_start = TASK_SIZE; + tlb->range_end = 0; + } +} + +static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) +{ + if (!tlb->fullmm) { + if (addr < tlb->range_start) + tlb->range_start = addr; + if (addr + PAGE_SIZE > tlb->range_end) + tlb->range_end = addr + PAGE_SIZE; + } +} + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush(tlb); + if (!tlb_fast_mode(tlb)) { + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + } +} + static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { @@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) tlb->mm = mm; tlb->fullmm = full_mm_flush; + tlb->vma = NULL; + tlb->nr = 0; return tlb; } @@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - if (tlb->fullmm) - flush_tlb_mm(tlb->mm); + tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); @@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) static inline void tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) { - if (!tlb->fullmm) { - if (addr < tlb->range_start) - tlb->range_start = addr; - if (addr + PAGE_SIZE > tlb->range_end) - tlb->range_end = addr + PAGE_SIZE; - } + tlb_add_flush(tlb, addr); } /* @@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm) { flush_cache_range(vma, vma->vm_start, vma->vm_end); + tlb->vma = vma; tlb->range_start = TASK_SIZE; tlb->range_end = 0; } @@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { - if (!tlb->fullmm && tlb->range_end > 0) - flush_tlb_range(vma, tlb->range_start, tlb->range_end); + if (!tlb->fullmm) + tlb_flush(tlb); +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + } else { + tlb->pages[tlb->nr++] = page; + if (tlb->nr >= FREE_PTE_NR) + tlb_flush_mmu(tlb); + } +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + pgtable_page_dtor(pte); + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, pte); } -#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a2..d2005de383b8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -10,12 +10,7 @@ #ifndef _ASMARM_TLBFLUSH_H #define _ASMARM_TLBFLUSH_H - -#ifndef CONFIG_MMU - -#define tlb_flush(tlb) ((void) tlb) - -#else /* CONFIG_MMU */ +#ifdef CONFIG_MMU #include <asm/glue.h> diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index d600bd350704..44b84fe6e1b0 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, /* * One-time initialisation. */ -static void reset_ctrl_regs(void *unused) +static void reset_ctrl_regs(void *info) { - int i; + int i, cpu = smp_processor_id(); + u32 dbg_power; + cpumask_t *cpumask = info; /* * v7 debug contains save and restore registers so that debug state @@ -850,6 +852,17 @@ static void reset_ctrl_regs(void *unused) */ if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { /* + * Ensure sticky power-down is clear (i.e. debug logic is + * powered up). + */ + asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); + if ((dbg_power & 0x1) == 0) { + pr_warning("CPU %d debug is powered down!\n", cpu); + cpumask_or(cpumask, cpumask, cpumask_of(cpu)); + return; + } + + /* * Unconditionally clear the lock by writing a value * other than 0xC5ACCE55 to the access register. */ @@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { static int __init arch_hw_breakpoint_init(void) { u32 dscr; + cpumask_t cpumask = { CPU_BITS_NONE }; debug_arch = get_debug_arch(); @@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void) * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ - on_each_cpu(reset_ctrl_regs, NULL, 1); + on_each_cpu(reset_ctrl_regs, &cpumask, 1); + if (!cpumask_empty(&cpumask)) { + core_num_brps = 0; + core_num_reserved_brps = 0; + core_num_wrps = 0; + return 0; + } ARM_DBG_READ(c1, 0, dscr); if (dscr & ARM_DSCR_HDBGEN) { diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c4..8f6ed43861f1 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) return space_cccc_1100_010x(insn, asi); - } else if ((insn & 0x0e000000) == 0x0c400000) { + } else if ((insn & 0x0e000000) == 0x0c000000) { return space_cccc_110x(insn, asi); diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e6..2c79eec19262 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -97,28 +97,34 @@ set_irq_affinity(int irq, irq, cpu); return err; #else - return 0; + return -EINVAL; #endif } static int init_cpu_pmu(void) { - int i, err = 0; + int i, irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; - if (!pdev) { - err = -ENODEV; - goto out; - } + if (!pdev) + return -ENODEV; + + irqs = pdev->num_resources; + + /* + * If we have a single PMU interrupt that we can't shift, assume that + * we're running on a uniprocessor machine and continue. + */ + if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) + return 0; - for (i = 0; i < pdev->num_resources; ++i) { + for (i = 0; i < irqs; ++i) { err = set_irq_affinity(platform_get_irq(pdev, i), i); if (err) break; } -out: return err; } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61e..b13e70f63d71 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num, while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; - if (idx & 0x1) - reg = encode_ctrl_reg(arch_ctrl); - else + if (num & 0x1) reg = bp->attr.bp_addr; + else + reg = encode_ctrl_reg(arch_ctrl); } put: diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d6..5ea4fb718b97 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -226,8 +226,8 @@ int cpu_architecture(void) * Register 0 and check for VMSAv7 or PMSAv7 */ asm("mrc p15, 0, %0, c0, c1, 4" : "=r" (mmfr0)); - if ((mmfr0 & 0x0000000f) == 0x00000003 || - (mmfr0 & 0x000000f0) == 0x00000030) + if ((mmfr0 & 0x0000000f) >= 0x00000003 || + (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..abaf8445ce25 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..61462790757f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -21,6 +21,12 @@ #define ARM_CPU_KEEP(x) #endif +#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) +#define ARM_EXIT_KEEP(x) x +#else +#define ARM_EXIT_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -43,6 +49,7 @@ SECTIONS _sinittext = .; HEAD_TEXT INIT_TEXT + ARM_EXIT_KEEP(EXIT_TEXT) _einittext = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; @@ -67,6 +74,7 @@ SECTIONS #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) #endif } @@ -162,6 +170,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) . = ALIGN(PAGE_SIZE); __init_end = .; #endif @@ -247,6 +256,8 @@ SECTIONS } #endif + NOTES + BSS_SECTION(0, 0, 0) _end = .; diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 343de73161fa..4a68c2b1ec11 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c @@ -132,7 +132,7 @@ out: return ret; } -static int __init davinci_cpu_init(struct cpufreq_policy *policy) +static int davinci_cpu_init(struct cpufreq_policy *policy) { int result = 0; struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 9eec63070e0c..beda8a4133a0 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = { .resource = da850_mcasp_resources, }; +struct platform_device davinci_pcm_device = { + .name = "davinci-pcm-audio", + .id = -1, +}; + void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) { + platform_device_register(&davinci_pcm_device); + /* DA830/OMAP-L137 has 3 instances of McASP */ if (cpu_is_davinci_da830() && id == 1) { da830_mcasp1_device.dev.platform_data = pdata; diff --git a/arch/arm/mach-davinci/gpio-tnetv107x.c b/arch/arm/mach-davinci/gpio-tnetv107x.c index d10298620e2c..3fa3e2867e19 100644 --- a/arch/arm/mach-davinci/gpio-tnetv107x.c +++ b/arch/arm/mach-davinci/gpio-tnetv107x.c @@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset) spin_lock_irqsave(&ctlr->lock, flags); - gpio_reg_set_bit(®s->enable, gpio); + gpio_reg_set_bit(regs->enable, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); @@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset) spin_lock_irqsave(&ctlr->lock, flags); - gpio_reg_clear_bit(®s->enable, gpio); + gpio_reg_clear_bit(regs->enable, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); } @@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset) spin_lock_irqsave(&ctlr->lock, flags); - gpio_reg_set_bit(®s->direction, gpio); + gpio_reg_set_bit(regs->direction, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); @@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip, spin_lock_irqsave(&ctlr->lock, flags); if (value) - gpio_reg_set_bit(®s->data_out, gpio); + gpio_reg_set_bit(regs->data_out, gpio); else - gpio_reg_clear_bit(®s->data_out, gpio); + gpio_reg_clear_bit(regs->data_out, gpio); - gpio_reg_clear_bit(®s->direction, gpio); + gpio_reg_clear_bit(regs->direction, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); @@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset) unsigned gpio = chip->base + offset; int ret; - ret = gpio_reg_get_bit(®s->data_in, gpio); + ret = gpio_reg_get_bit(regs->data_in, gpio); return ret ? 1 : 0; } @@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip, spin_lock_irqsave(&ctlr->lock, flags); if (value) - gpio_reg_set_bit(®s->data_out, gpio); + gpio_reg_set_bit(regs->data_out, gpio); else - gpio_reg_clear_bit(®s->data_out, gpio); + gpio_reg_clear_bit(regs->data_out, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); } diff --git a/arch/arm/mach-davinci/include/mach/clkdev.h b/arch/arm/mach-davinci/include/mach/clkdev.h index 730c49d1ebd8..14a504887189 100644 --- a/arch/arm/mach-davinci/include/mach/clkdev.h +++ b/arch/arm/mach-davinci/include/mach/clkdev.h @@ -1,6 +1,8 @@ #ifndef __MACH_CLKDEV_H #define __MACH_CLKDEV_H +struct clk; + static inline int __clk_get(struct clk *clk) { return 1; diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c index 337392c3f549..acb7ae5b0a25 100644 --- a/arch/arm/mach-omap2/clkt_dpll.c +++ b/arch/arm/mach-omap2/clkt_dpll.c @@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n) dd = clk->dpll_data; /* DPLL divider must result in a valid jitter correction val */ - fint = clk->parent->rate / (n + 1); + fint = clk->parent->rate / n; if (fint < DPLL_FINT_BAND1_MIN) { pr_debug("rejecting n=%d due to Fint failure, " diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 394413dc7deb..24b88504df0f 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c @@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_type_t irq) { struct omap_mbox2_priv *p = mbox->priv; - u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; - l = mbox_read_reg(p->irqdisable); - l &= ~bit; - mbox_write_reg(l, p->irqdisable); + u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; + + if (!cpu_is_omap44xx()) + bit = mbox_read_reg(p->irqdisable) & ~bit; + + mbox_write_reg(bit, p->irqdisable); } static void omap2_mbox_ack_irq(struct omap_mbox *mbox, @@ -334,7 +336,7 @@ static struct omap_mbox mbox_iva_info = { .priv = &omap2_mbox_iva_priv, }; -struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL }; +struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL }; #endif #if defined(CONFIG_ARCH_OMAP4) diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 98148b6c36e9..6c84659cf846 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry( list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; - (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir, + (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, m, &omap_mux_dbg_signal_fops); } } diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 125f56591fb5..a5a83b358ddd 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -637,14 +637,14 @@ static int __init pm_dbg_init(void) } - (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d, + (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d, &enable_off_mode, &pm_dbg_option_fops); - (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d, + (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d, &sleep_while_idle, &pm_dbg_option_fops); - (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d, + (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d, &wakeup_timer_seconds, &pm_dbg_option_fops); (void) debugfs_create_file("wakeup_timer_milliseconds", - S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds, + S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds, &pm_dbg_option_fops); pm_dbg_init_done = 1; diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h index 729a644ce852..3300ff6e3cfe 100644 --- a/arch/arm/mach-omap2/prcm_mpu44xx.h +++ b/arch/arm/mach-omap2/prcm_mpu44xx.h @@ -38,8 +38,8 @@ #define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 /* PRCM_MPU clockdomain register offsets (from instance start) */ -#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000 -#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000 +#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018 +#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018 /* diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index c37e823266d3..1a777e34d0c2 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c @@ -282,6 +282,7 @@ error: dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" "interrupt handler. Smartreflex will" "not function as desired\n", __func__); + kfree(name); kfree(sr_info); return ret; } @@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) ret = sr_late_init(sr_info); if (ret) { pr_warning("%s: Error in SR late init\n", __func__); - return ret; + goto err_release_region; } } @@ -890,17 +891,20 @@ static int __init omap_sr_probe(struct platform_device *pdev) * not try to create rest of the debugfs entries. */ vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); - if (!vdd_dbg_dir) - return -EINVAL; + if (!vdd_dbg_dir) { + ret = -EINVAL; + goto err_release_region; + } dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); if (IS_ERR(dbg_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", __func__); - return PTR_ERR(dbg_dir); + ret = PTR_ERR(dbg_dir); + goto err_release_region; } - (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir, + (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, (void *)sr_info, &pm_sr_fops); (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, &sr_info->err_weight); @@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) if (IS_ERR(nvalue_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory" "for n-values\n", __func__); - return PTR_ERR(nvalue_dir); + ret = PTR_ERR(nvalue_dir); + goto err_release_region; } omap_voltage_get_volttable(sr_info->voltdm, &volt_data); @@ -922,24 +927,16 @@ static int __init omap_sr_probe(struct platform_device *pdev) " corresponding vdd vdd_%s. Cannot create debugfs" "entries for n-values\n", __func__, sr_info->voltdm->name); - return -ENODATA; + ret = -ENODATA; + goto err_release_region; } for (i = 0; i < sr_info->nvalue_count; i++) { - char *name; - char volt_name[32]; - - name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL); - if (!name) { - dev_err(&pdev->dev, "%s: Unable to allocate memory" - " for n-value directory name\n", __func__); - return -ENOMEM; - } + char name[NVALUE_NAME_LEN + 1]; - strcpy(name, "volt_"); - sprintf(volt_name, "%d", volt_data[i].volt_nominal); - strcat(name, volt_name); - (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir, + snprintf(name, sizeof(name), "volt_%d", + volt_data[i].volt_nominal); + (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].nvalue)); } diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 7b7c2683ae7b..0fc550e7e482 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -39,6 +39,7 @@ #include <asm/mach/time.h> #include <plat/dmtimer.h> #include <asm/localtimer.h> +#include <asm/sched_clock.h> #include "timer-gp.h" @@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void) /* * clocksource */ +static DEFINE_CLOCK_DATA(cd); static struct omap_dm_timer *gpt_clocksource; static cycle_t clocksource_read_cycles(struct clocksource *cs) { @@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static void notrace dmtimer_update_sched_clock(void) +{ + u32 cyc; + + cyc = omap_dm_timer_read_counter(gpt_clocksource); + + update_sched_clock(&cd, cyc, (u32)~0); +} + /* Setup free-running counter for clocksource */ static void __init omap2_gp_clocksource_init(void) { @@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void) omap_dm_timer_set_load_start(gpt, 1, 0); + init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate); + if (clocksource_register_hz(&clocksource_gpt, tick_rate)) printk(err2, clocksource_gpt.name); } diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fbc5b775f895..b166b1d845d7 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = { &pxa25x_device_assp, &pxa25x_device_pwm0, &pxa25x_device_pwm1, + &pxa_device_asoc_platform, }; static struct sys_device pxa25x_sysdev[] = { diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index c31e601eb49c..b9b1e5c2b290 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev) goto err_rfk_alloc; } - rfkill_set_led_trigger_name(rfk, "tosa-bt"); - rc = rfkill_register(rfk); if (rc) goto err_rfkill; diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index af152e70cfcf..f2582ec300d9 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = { .dev.platform_data = &sharpsl_rom_data, }; +static struct platform_device wm9712_device = { + .name = "wm9712-codec", + .id = -1, +}; + static struct platform_device *devices[] __initdata = { &tosascoop_device, &tosascoop_jc_device, @@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = { &tosaled_device, &tosa_bt_device, &sharpsl_rom_device, + &wm9712_device, }; static void tosa_poweroff(void) diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig index a0cb2581894f..50825a3f91cc 100644 --- a/arch/arm/mach-s3c2440/Kconfig +++ b/arch/arm/mach-s3c2440/Kconfig @@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02 select POWER_SUPPLY select MACH_NEO1973 select S3C2410_PWM + select S3C_DEV_USB_HOST help Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone diff --git a/arch/arm/mach-s3c2440/include/mach/gta02.h b/arch/arm/mach-s3c2440/include/mach/gta02.h index 953331d8d56a..3a56a229cac6 100644 --- a/arch/arm/mach-s3c2440/include/mach/gta02.h +++ b/arch/arm/mach-s3c2440/include/mach/gta02.h @@ -44,19 +44,19 @@ #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ -#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ -#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 -#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ -#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ -#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 -#define GTA02_GPIO_3D_RESET S3C2440_GPJ5 -#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ -#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 -#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 -#define GTA02_GPIO_KEEPACT S3C2440_GPJ8 -#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 -#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ -#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ +#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */ +#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2) +#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */ +#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */ +#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4) +#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5) +#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */ +#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7) +#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8) +#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8) +#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10) +#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */ +#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */ #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 #define GTA02_IRQ_MODEM IRQ_EINT1 diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c index dd3782064508..fdfc4d5e37a1 100644 --- a/arch/arm/mach-s3c64xx/clock.c +++ b/arch/arm/mach-s3c64xx/clock.c @@ -151,6 +151,12 @@ static struct clk init_clocks_off[] = { .enable = s3c64xx_pclk_ctrl, .ctrlbit = S3C_CLKCON_PCLK_IIC, }, { + .name = "i2c", + .id = 1, + .parent = &clk_p, + .enable = s3c64xx_pclk_ctrl, + .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, + }, { .name = "iis", .id = 0, .parent = &clk_p, diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index 135db1b41252..c35585cf8c4f 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c @@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase, regptr = regs + PL080_Cx_BASE(0); - for (ch = 0; ch < 8; ch++, chno++, chptr++) { - printk(KERN_INFO "%s: registering DMA %d (%p)\n", - __func__, chno, regptr); + for (ch = 0; ch < 8; ch++, chptr++) { + pr_debug("%s: registering DMA %d (%p)\n", + __func__, chno + ch, regptr); chptr->bit = 1 << ch; - chptr->number = chno; + chptr->number = chno + ch; chptr->dmac = dmac; chptr->regs = regptr; regptr += PL080_Cx_STRIDE; @@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase, /* for the moment, permanently enable the controller */ writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); - printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); + printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n", + irq, regs, chno, chno+8); return 0; diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c index fd99a82e82c4..92b09085caaa 100644 --- a/arch/arm/mach-s3c64xx/gpiolib.c +++ b/arch/arm/mach-s3c64xx/gpiolib.c @@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = { .get_pull = s3c_gpio_getpull_updown, }; -int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) +static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) { return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; } @@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = { }, }; -int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) +static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) { return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; } diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index e85192a86fbe..a80a3163dd30 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -28,6 +28,7 @@ #include <linux/delay.h> #include <linux/smsc911x.h> #include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #ifdef CONFIG_SMDK6410_WM1190_EV1 #include <linux/mfd/wm8350/core.h> @@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = { /* VDD_UH_MMC, LDO5 on J5 */ static struct regulator_init_data smdk6410_vdduh_mmc = { .constraints = { - .name = "PVDD_UH/PVDD_MMC", + .name = "PVDD_UH+PVDD_MMC", .always_on = 1, }, }; @@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = { /* S3C64xx internal logic & PLL */ static struct regulator_init_data wm8350_dcdc1_data = { .constraints = { - .name = "PVDD_INT/PVDD_PLL", + .name = "PVDD_INT+PVDD_PLL", .min_uV = 1200000, .max_uV = 1200000, .always_on = 1, @@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = { static struct regulator_init_data wm8350_dcdc4_data = { .constraints = { - .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", + .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV", .min_uV = 3000000, .max_uV = 3000000, .always_on = 1, @@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = { /* OTGi/1190-EV1 HPVDD & AVDD */ static struct regulator_init_data wm8350_ldo4_data = { .constraints = { - .name = "PVDD_OTGI/HPVDD/AVDD", + .name = "PVDD_OTGI+HPVDD+AVDD", .min_uV = 1200000, .max_uV = 1200000, .apply_uV = 1, @@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = { static struct regulator_init_data wm1192_dcdc3 = { .constraints = { - .name = "PVDD_MEM/PVDD_GPS", + .name = "PVDD_MEM+PVDD_GPS", .always_on = 1, }, }; @@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = { static struct regulator_init_data wm1192_ldo1 = { .constraints = { - .name = "PVDD_LCD/PVDD_EXT", + .name = "PVDD_LCD+PVDD_EXT", .always_on = 1, }, .consumer_supplies = wm1192_ldo1_consumers, diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c index f8ed0d22db70..1d4d0ee9e870 100644 --- a/arch/arm/mach-s3c64xx/setup-keypad.c +++ b/arch/arm/mach-s3c64xx/setup-keypad.c @@ -17,7 +17,7 @@ void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) { /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ - s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); + s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3)); /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3)); diff --git a/arch/arm/mach-s3c64xx/setup-sdhci.c b/arch/arm/mach-s3c64xx/setup-sdhci.c index 1a942037c4ef..f344a222bc84 100644 --- a/arch/arm/mach-s3c64xx/setup-sdhci.c +++ b/arch/arm/mach-s3c64xx/setup-sdhci.c @@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev, else ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); - printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); + pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); writel(ctrl2, r + S3C_SDHCI_CONTROL2); writel(ctrl3, r + S3C_SDHCI_CONTROL3); } diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h index 203dd5a18bd5..058dab4482a1 100644 --- a/arch/arm/mach-s5p6442/include/mach/map.h +++ b/arch/arm/mach-s5p6442/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5p6442/include/mach/map.h * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5P6442 - Memory map definitions @@ -16,56 +16,61 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -#define S5P6442_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5P6442_PA_CHIPID +#define S5P6442_PA_SDRAM 0x20000000 -#define S5P6442_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5P6442_PA_SYSCON +#define S5P6442_PA_I2S0 0xC0B00000 +#define S5P6442_PA_I2S1 0xF2200000 -#define S5P6442_PA_GPIO (0xE0200000) +#define S5P6442_PA_CHIPID 0xE0000000 -#define S5P6442_PA_VIC0 (0xE4000000) -#define S5P6442_PA_VIC1 (0xE4100000) -#define S5P6442_PA_VIC2 (0xE4200000) +#define S5P6442_PA_SYSCON 0xE0100000 -#define S5P6442_PA_SROMC (0xE7000000) -#define S5P_PA_SROMC S5P6442_PA_SROMC +#define S5P6442_PA_GPIO 0xE0200000 -#define S5P6442_PA_MDMA 0xE8000000 -#define S5P6442_PA_PDMA 0xE9000000 +#define S5P6442_PA_VIC0 0xE4000000 +#define S5P6442_PA_VIC1 0xE4100000 +#define S5P6442_PA_VIC2 0xE4200000 -#define S5P6442_PA_TIMER (0xEA000000) -#define S5P_PA_TIMER S5P6442_PA_TIMER +#define S5P6442_PA_SROMC 0xE7000000 -#define S5P6442_PA_SYSTIMER (0xEA100000) +#define S5P6442_PA_MDMA 0xE8000000 +#define S5P6442_PA_PDMA 0xE9000000 -#define S5P6442_PA_WATCHDOG (0xEA200000) +#define S5P6442_PA_TIMER 0xEA000000 -#define S5P6442_PA_UART (0xEC000000) +#define S5P6442_PA_SYSTIMER 0xEA100000 -#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0) -#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400) -#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800) -#define S5P_SZ_UART SZ_256 +#define S5P6442_PA_WATCHDOG 0xEA200000 -#define S5P6442_PA_IIC0 (0xEC100000) +#define S5P6442_PA_UART 0xEC000000 -#define S5P6442_PA_SDRAM (0x20000000) -#define S5P_PA_SDRAM S5P6442_PA_SDRAM +#define S5P6442_PA_IIC0 0xEC100000 #define S5P6442_PA_SPI 0xEC300000 -/* I2S */ -#define S5P6442_PA_I2S0 0xC0B00000 -#define S5P6442_PA_I2S1 0xF2200000 - -/* PCM */ #define S5P6442_PA_PCM0 0xF2400000 #define S5P6442_PA_PCM1 0xF2500000 -/* compatibiltiy defines. */ +/* Compatibiltiy Defines */ + +#define S3C_PA_IIC S5P6442_PA_IIC0 #define S3C_PA_WDT S5P6442_PA_WATCHDOG + +#define S5P_PA_CHIPID S5P6442_PA_CHIPID +#define S5P_PA_SDRAM S5P6442_PA_SDRAM +#define S5P_PA_SROMC S5P6442_PA_SROMC +#define S5P_PA_SYSCON S5P6442_PA_SYSCON +#define S5P_PA_TIMER S5P6442_PA_TIMER + +/* UART */ + #define S3C_PA_UART S5P6442_PA_UART -#define S3C_PA_IIC S5P6442_PA_IIC0 + +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) + +#define S5P_SZ_UART SZ_256 #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5p64x0/include/mach/gpio.h b/arch/arm/mach-s5p64x0/include/mach/gpio.h index 5486c8f01f1d..adb5f298ead8 100644 --- a/arch/arm/mach-s5p64x0/include/mach/gpio.h +++ b/arch/arm/mach-s5p64x0/include/mach/gpio.h @@ -23,7 +23,7 @@ #define S5P6440_GPIO_A_NR (6) #define S5P6440_GPIO_B_NR (7) #define S5P6440_GPIO_C_NR (8) -#define S5P6440_GPIO_F_NR (2) +#define S5P6440_GPIO_F_NR (16) #define S5P6440_GPIO_G_NR (7) #define S5P6440_GPIO_H_NR (10) #define S5P6440_GPIO_I_NR (16) @@ -36,7 +36,7 @@ #define S5P6450_GPIO_B_NR (7) #define S5P6450_GPIO_C_NR (8) #define S5P6450_GPIO_D_NR (8) -#define S5P6450_GPIO_F_NR (2) +#define S5P6450_GPIO_F_NR (16) #define S5P6450_GPIO_G_NR (14) #define S5P6450_GPIO_H_NR (10) #define S5P6450_GPIO_I_NR (16) diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h index a9365e5ba614..95c91257c7ca 100644 --- a/arch/arm/mach-s5p64x0/include/mach/map.h +++ b/arch/arm/mach-s5p64x0/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5p64x0/include/mach/map.h * - * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5P64X0 - Memory map definitions @@ -16,64 +16,46 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -#define S5P64X0_PA_SDRAM (0x20000000) +#define S5P64X0_PA_SDRAM 0x20000000 -#define S5P64X0_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5P64X0_PA_CHIPID - -#define S5P64X0_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5P64X0_PA_SYSCON - -#define S5P64X0_PA_GPIO (0xE0308000) - -#define S5P64X0_PA_VIC0 (0xE4000000) -#define S5P64X0_PA_VIC1 (0xE4100000) +#define S5P64X0_PA_CHIPID 0xE0000000 -#define S5P64X0_PA_SROMC (0xE7000000) -#define S5P_PA_SROMC S5P64X0_PA_SROMC - -#define S5P64X0_PA_PDMA (0xE9000000) - -#define S5P64X0_PA_TIMER (0xEA000000) -#define S5P_PA_TIMER S5P64X0_PA_TIMER +#define S5P64X0_PA_SYSCON 0xE0100000 -#define S5P64X0_PA_RTC (0xEA100000) +#define S5P64X0_PA_GPIO 0xE0308000 -#define S5P64X0_PA_WDT (0xEA200000) +#define S5P64X0_PA_VIC0 0xE4000000 +#define S5P64X0_PA_VIC1 0xE4100000 -#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) -#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) +#define S5P64X0_PA_SROMC 0xE7000000 -#define S5P_PA_UART0 S5P6450_PA_UART(0) -#define S5P_PA_UART1 S5P6450_PA_UART(1) -#define S5P_PA_UART2 S5P6450_PA_UART(2) -#define S5P_PA_UART3 S5P6450_PA_UART(3) -#define S5P_PA_UART4 S5P6450_PA_UART(4) -#define S5P_PA_UART5 S5P6450_PA_UART(5) +#define S5P64X0_PA_PDMA 0xE9000000 -#define S5P_SZ_UART SZ_256 +#define S5P64X0_PA_TIMER 0xEA000000 +#define S5P64X0_PA_RTC 0xEA100000 +#define S5P64X0_PA_WDT 0xEA200000 -#define S5P6440_PA_IIC0 (0xEC104000) -#define S5P6440_PA_IIC1 (0xEC20F000) -#define S5P6450_PA_IIC0 (0xEC100000) -#define S5P6450_PA_IIC1 (0xEC200000) +#define S5P6440_PA_IIC0 0xEC104000 +#define S5P6440_PA_IIC1 0xEC20F000 +#define S5P6450_PA_IIC0 0xEC100000 +#define S5P6450_PA_IIC1 0xEC200000 -#define S5P64X0_PA_SPI0 (0xEC400000) -#define S5P64X0_PA_SPI1 (0xEC500000) +#define S5P64X0_PA_SPI0 0xEC400000 +#define S5P64X0_PA_SPI1 0xEC500000 -#define S5P64X0_PA_HSOTG (0xED100000) +#define S5P64X0_PA_HSOTG 0xED100000 #define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) -#define S5P64X0_PA_I2S (0xF2000000) +#define S5P64X0_PA_I2S 0xF2000000 #define S5P6450_PA_I2S1 0xF2800000 #define S5P6450_PA_I2S2 0xF2900000 -#define S5P64X0_PA_PCM (0xF2100000) +#define S5P64X0_PA_PCM 0xF2100000 -#define S5P64X0_PA_ADC (0xF3000000) +#define S5P64X0_PA_ADC 0xF3000000 -/* compatibiltiy defines. */ +/* Compatibiltiy Defines */ #define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) #define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) @@ -83,6 +65,25 @@ #define S3C_PA_RTC S5P64X0_PA_RTC #define S3C_PA_WDT S5P64X0_PA_WDT +#define S5P_PA_CHIPID S5P64X0_PA_CHIPID +#define S5P_PA_SROMC S5P64X0_PA_SROMC +#define S5P_PA_SYSCON S5P64X0_PA_SYSCON +#define S5P_PA_TIMER S5P64X0_PA_TIMER + #define SAMSUNG_PA_ADC S5P64X0_PA_ADC +/* UART */ + +#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) +#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) + +#define S5P_PA_UART0 S5P6450_PA_UART(0) +#define S5P_PA_UART1 S5P6450_PA_UART(1) +#define S5P_PA_UART2 S5P6450_PA_UART(2) +#define S5P_PA_UART3 S5P6450_PA_UART(3) +#define S5P_PA_UART4 S5P6450_PA_UART(4) +#define S5P_PA_UART5 S5P6450_PA_UART(5) + +#define S5P_SZ_UART SZ_256 + #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h index 328467b346aa..ccbe6b767f7d 100644 --- a/arch/arm/mach-s5pc100/include/mach/map.h +++ b/arch/arm/mach-s5pc100/include/mach/map.h @@ -1,5 +1,8 @@ /* linux/arch/arm/mach-s5pc100/include/mach/map.h * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * * Copyright 2009 Samsung Electronics Co. * Byungho Min <bhmin@samsung.com> * @@ -16,145 +19,115 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -/* - * map-base.h has already defined virtual memory address - * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s) - * S3C_VA_SYS S3C_ADDR(0x00100000) system control - * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used) - * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block - * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog - * S3C_VA_UART S3C_ADDR(0x01000000) UART - * - * S5PC100 specific virtual memory address can be defined here - * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO - * - */ +#define S5PC100_PA_SDRAM 0x20000000 + +#define S5PC100_PA_ONENAND 0xE7100000 +#define S5PC100_PA_ONENAND_BUF 0xB0000000 + +#define S5PC100_PA_CHIPID 0xE0000000 -#define S5PC100_PA_ONENAND_BUF (0xB0000000) -#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M) +#define S5PC100_PA_SYSCON 0xE0100000 -/* Chip ID */ +#define S5PC100_PA_OTHERS 0xE0200000 -#define S5PC100_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5PC100_PA_CHIPID +#define S5PC100_PA_GPIO 0xE0300000 -#define S5PC100_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5PC100_PA_SYSCON +#define S5PC100_PA_VIC0 0xE4000000 +#define S5PC100_PA_VIC1 0xE4100000 +#define S5PC100_PA_VIC2 0xE4200000 -#define S5PC100_PA_OTHERS (0xE0200000) -#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) +#define S5PC100_PA_SROMC 0xE7000000 -#define S5PC100_PA_GPIO (0xE0300000) -#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) +#define S5PC100_PA_CFCON 0xE7800000 -/* Interrupt */ -#define S5PC100_PA_VIC0 (0xE4000000) -#define S5PC100_PA_VIC1 (0xE4100000) -#define S5PC100_PA_VIC2 (0xE4200000) -#define S5PC100_VA_VIC S3C_VA_IRQ -#define S5PC100_VA_VIC_OFFSET 0x10000 -#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET)) +#define S5PC100_PA_MDMA 0xE8100000 +#define S5PC100_PA_PDMA0 0xE9000000 +#define S5PC100_PA_PDMA1 0xE9200000 -#define S5PC100_PA_SROMC (0xE7000000) -#define S5P_PA_SROMC S5PC100_PA_SROMC +#define S5PC100_PA_TIMER 0xEA000000 +#define S5PC100_PA_SYSTIMER 0xEA100000 +#define S5PC100_PA_WATCHDOG 0xEA200000 +#define S5PC100_PA_RTC 0xEA300000 -#define S5PC100_PA_ONENAND (0xE7100000) +#define S5PC100_PA_UART 0xEC000000 -#define S5PC100_PA_CFCON (0xE7800000) +#define S5PC100_PA_IIC0 0xEC100000 +#define S5PC100_PA_IIC1 0xEC200000 -/* DMA */ -#define S5PC100_PA_MDMA (0xE8100000) -#define S5PC100_PA_PDMA0 (0xE9000000) -#define S5PC100_PA_PDMA1 (0xE9200000) +#define S5PC100_PA_SPI0 0xEC300000 +#define S5PC100_PA_SPI1 0xEC400000 +#define S5PC100_PA_SPI2 0xEC500000 -/* Timer */ -#define S5PC100_PA_TIMER (0xEA000000) -#define S5P_PA_TIMER S5PC100_PA_TIMER +#define S5PC100_PA_USB_HSOTG 0xED200000 +#define S5PC100_PA_USB_HSPHY 0xED300000 -#define S5PC100_PA_SYSTIMER (0xEA100000) +#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) -#define S5PC100_PA_WATCHDOG (0xEA200000) -#define S5PC100_PA_RTC (0xEA300000) +#define S5PC100_PA_FB 0xEE000000 -#define S5PC100_PA_UART (0xEC000000) +#define S5PC100_PA_FIMC0 0xEE200000 +#define S5PC100_PA_FIMC1 0xEE300000 +#define S5PC100_PA_FIMC2 0xEE400000 -#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0) -#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400) -#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800) -#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00) -#define S5P_SZ_UART SZ_256 +#define S5PC100_PA_I2S0 0xF2000000 +#define S5PC100_PA_I2S1 0xF2100000 +#define S5PC100_PA_I2S2 0xF2200000 -#define S5PC100_PA_IIC0 (0xEC100000) -#define S5PC100_PA_IIC1 (0xEC200000) +#define S5PC100_PA_AC97 0xF2300000 -/* SPI */ -#define S5PC100_PA_SPI0 0xEC300000 -#define S5PC100_PA_SPI1 0xEC400000 -#define S5PC100_PA_SPI2 0xEC500000 +#define S5PC100_PA_PCM0 0xF2400000 +#define S5PC100_PA_PCM1 0xF2500000 -/* USB HS OTG */ -#define S5PC100_PA_USB_HSOTG (0xED200000) -#define S5PC100_PA_USB_HSPHY (0xED300000) +#define S5PC100_PA_SPDIF 0xF2600000 -#define S5PC100_PA_FB (0xEE000000) +#define S5PC100_PA_TSADC 0xF3000000 -#define S5PC100_PA_FIMC0 (0xEE200000) -#define S5PC100_PA_FIMC1 (0xEE300000) -#define S5PC100_PA_FIMC2 (0xEE400000) +#define S5PC100_PA_KEYPAD 0xF3100000 -#define S5PC100_PA_I2S0 (0xF2000000) -#define S5PC100_PA_I2S1 (0xF2100000) -#define S5PC100_PA_I2S2 (0xF2200000) +/* Compatibiltiy Defines */ -#define S5PC100_PA_AC97 0xF2300000 +#define S3C_PA_FB S5PC100_PA_FB +#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) +#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) +#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) +#define S3C_PA_IIC S5PC100_PA_IIC0 +#define S3C_PA_IIC1 S5PC100_PA_IIC1 +#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD +#define S3C_PA_ONENAND S5PC100_PA_ONENAND +#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF +#define S3C_PA_RTC S5PC100_PA_RTC +#define S3C_PA_TSADC S5PC100_PA_TSADC +#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG +#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY +#define S3C_PA_WDT S5PC100_PA_WATCHDOG -/* PCM */ -#define S5PC100_PA_PCM0 0xF2400000 -#define S5PC100_PA_PCM1 0xF2500000 +#define S5P_PA_CHIPID S5PC100_PA_CHIPID +#define S5P_PA_FIMC0 S5PC100_PA_FIMC0 +#define S5P_PA_FIMC1 S5PC100_PA_FIMC1 +#define S5P_PA_FIMC2 S5PC100_PA_FIMC2 +#define S5P_PA_SDRAM S5PC100_PA_SDRAM +#define S5P_PA_SROMC S5PC100_PA_SROMC +#define S5P_PA_SYSCON S5PC100_PA_SYSCON +#define S5P_PA_TIMER S5PC100_PA_TIMER -#define S5PC100_PA_SPDIF 0xF2600000 +#define SAMSUNG_PA_ADC S5PC100_PA_TSADC +#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON +#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD -#define S5PC100_PA_TSADC (0xF3000000) +#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) -/* KEYPAD */ -#define S5PC100_PA_KEYPAD (0xF3100000) +#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M) -#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) +/* UART */ -#define S5PC100_PA_SDRAM (0x20000000) -#define S5P_PA_SDRAM S5PC100_PA_SDRAM +#define S3C_PA_UART S5PC100_PA_UART -/* compatibiltiy defines. */ -#define S3C_PA_UART S5PC100_PA_UART -#define S3C_PA_IIC S5PC100_PA_IIC0 -#define S3C_PA_IIC1 S5PC100_PA_IIC1 -#define S3C_PA_FB S5PC100_PA_FB -#define S3C_PA_G2D S5PC100_PA_G2D -#define S3C_PA_G3D S5PC100_PA_G3D -#define S3C_PA_JPEG S5PC100_PA_JPEG -#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR -#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0) -#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1) -#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2) -#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG -#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY -#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) -#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) -#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) -#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD -#define S3C_PA_WDT S5PC100_PA_WATCHDOG -#define S3C_PA_TSADC S5PC100_PA_TSADC -#define S3C_PA_ONENAND S5PC100_PA_ONENAND -#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF -#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF -#define S3C_PA_RTC S5PC100_PA_RTC - -#define SAMSUNG_PA_ADC S5PC100_PA_TSADC -#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON -#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) +#define S5P_PA_UART3 S5P_PA_UART(3) -#define S5P_PA_FIMC0 S5PC100_PA_FIMC0 -#define S5P_PA_FIMC1 S5PC100_PA_FIMC1 -#define S5P_PA_FIMC2 S5PC100_PA_FIMC2 +#define S5P_SZ_UART SZ_256 -#endif /* __ASM_ARCH_C100_MAP_H */ +#endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h index 3611492ad681..1dd58836fd4f 100644 --- a/arch/arm/mach-s5pv210/include/mach/map.h +++ b/arch/arm/mach-s5pv210/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5pv210/include/mach/map.h * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PV210 - Memory map definitions @@ -16,122 +16,120 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -#define S5PV210_PA_SROM_BANK5 (0xA8000000) +#define S5PV210_PA_SDRAM 0x20000000 -#define S5PC110_PA_ONENAND (0xB0000000) -#define S5P_PA_ONENAND S5PC110_PA_ONENAND +#define S5PV210_PA_SROM_BANK5 0xA8000000 -#define S5PC110_PA_ONENAND_DMA (0xB0600000) -#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA +#define S5PC110_PA_ONENAND 0xB0000000 +#define S5PC110_PA_ONENAND_DMA 0xB0600000 -#define S5PV210_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5PV210_PA_CHIPID +#define S5PV210_PA_CHIPID 0xE0000000 -#define S5PV210_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5PV210_PA_SYSCON +#define S5PV210_PA_SYSCON 0xE0100000 -#define S5PV210_PA_GPIO (0xE0200000) +#define S5PV210_PA_GPIO 0xE0200000 -/* SPI */ -#define S5PV210_PA_SPI0 0xE1300000 -#define S5PV210_PA_SPI1 0xE1400000 +#define S5PV210_PA_SPDIF 0xE1100000 -#define S5PV210_PA_KEYPAD (0xE1600000) +#define S5PV210_PA_SPI0 0xE1300000 +#define S5PV210_PA_SPI1 0xE1400000 -#define S5PV210_PA_IIC0 (0xE1800000) -#define S5PV210_PA_IIC1 (0xFAB00000) -#define S5PV210_PA_IIC2 (0xE1A00000) +#define S5PV210_PA_KEYPAD 0xE1600000 -#define S5PV210_PA_TIMER (0xE2500000) -#define S5P_PA_TIMER S5PV210_PA_TIMER +#define S5PV210_PA_ADC 0xE1700000 -#define S5PV210_PA_SYSTIMER (0xE2600000) +#define S5PV210_PA_IIC0 0xE1800000 +#define S5PV210_PA_IIC1 0xFAB00000 +#define S5PV210_PA_IIC2 0xE1A00000 -#define S5PV210_PA_WATCHDOG (0xE2700000) +#define S5PV210_PA_AC97 0xE2200000 -#define S5PV210_PA_RTC (0xE2800000) -#define S5PV210_PA_UART (0xE2900000) +#define S5PV210_PA_PCM0 0xE2300000 +#define S5PV210_PA_PCM1 0xE1200000 +#define S5PV210_PA_PCM2 0xE2B00000 -#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0) -#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400) -#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800) -#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00) +#define S5PV210_PA_TIMER 0xE2500000 +#define S5PV210_PA_SYSTIMER 0xE2600000 +#define S5PV210_PA_WATCHDOG 0xE2700000 +#define S5PV210_PA_RTC 0xE2800000 -#define S5P_SZ_UART SZ_256 +#define S5PV210_PA_UART 0xE2900000 -#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) +#define S5PV210_PA_SROMC 0xE8000000 -#define S5PV210_PA_SROMC (0xE8000000) -#define S5P_PA_SROMC S5PV210_PA_SROMC +#define S5PV210_PA_CFCON 0xE8200000 -#define S5PV210_PA_CFCON (0xE8200000) +#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) -#define S5PV210_PA_MDMA 0xFA200000 -#define S5PV210_PA_PDMA0 0xE0900000 -#define S5PV210_PA_PDMA1 0xE0A00000 +#define S5PV210_PA_HSOTG 0xEC000000 +#define S5PV210_PA_HSPHY 0xEC100000 -#define S5PV210_PA_FB (0xF8000000) +#define S5PV210_PA_IIS0 0xEEE30000 +#define S5PV210_PA_IIS1 0xE2100000 +#define S5PV210_PA_IIS2 0xE2A00000 -#define S5PV210_PA_FIMC0 (0xFB200000) -#define S5PV210_PA_FIMC1 (0xFB300000) -#define S5PV210_PA_FIMC2 (0xFB400000) +#define S5PV210_PA_DMC0 0xF0000000 +#define S5PV210_PA_DMC1 0xF1400000 -#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) +#define S5PV210_PA_VIC0 0xF2000000 +#define S5PV210_PA_VIC1 0xF2100000 +#define S5PV210_PA_VIC2 0xF2200000 +#define S5PV210_PA_VIC3 0xF2300000 -#define S5PV210_PA_HSOTG (0xEC000000) -#define S5PV210_PA_HSPHY (0xEC100000) +#define S5PV210_PA_FB 0xF8000000 -#define S5PV210_PA_VIC0 (0xF2000000) -#define S5PV210_PA_VIC1 (0xF2100000) -#define S5PV210_PA_VIC2 (0xF2200000) -#define S5PV210_PA_VIC3 (0xF2300000) +#define S5PV210_PA_MDMA 0xFA200000 +#define S5PV210_PA_PDMA0 0xE0900000 +#define S5PV210_PA_PDMA1 0xE0A00000 -#define S5PV210_PA_SDRAM (0x20000000) -#define S5P_PA_SDRAM S5PV210_PA_SDRAM +#define S5PV210_PA_MIPI_CSIS 0xFA600000 -/* S/PDIF */ -#define S5PV210_PA_SPDIF 0xE1100000 +#define S5PV210_PA_FIMC0 0xFB200000 +#define S5PV210_PA_FIMC1 0xFB300000 +#define S5PV210_PA_FIMC2 0xFB400000 -/* I2S */ -#define S5PV210_PA_IIS0 0xEEE30000 -#define S5PV210_PA_IIS1 0xE2100000 -#define S5PV210_PA_IIS2 0xE2A00000 +/* Compatibiltiy Defines */ -/* PCM */ -#define S5PV210_PA_PCM0 0xE2300000 -#define S5PV210_PA_PCM1 0xE1200000 -#define S5PV210_PA_PCM2 0xE2B00000 +#define S3C_PA_FB S5PV210_PA_FB +#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) +#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) +#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) +#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) +#define S3C_PA_IIC S5PV210_PA_IIC0 +#define S3C_PA_IIC1 S5PV210_PA_IIC1 +#define S3C_PA_IIC2 S5PV210_PA_IIC2 +#define S3C_PA_RTC S5PV210_PA_RTC +#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG +#define S3C_PA_WDT S5PV210_PA_WATCHDOG -/* AC97 */ -#define S5PV210_PA_AC97 0xE2200000 +#define S5P_PA_CHIPID S5PV210_PA_CHIPID +#define S5P_PA_FIMC0 S5PV210_PA_FIMC0 +#define S5P_PA_FIMC1 S5PV210_PA_FIMC1 +#define S5P_PA_FIMC2 S5PV210_PA_FIMC2 +#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS +#define S5P_PA_ONENAND S5PC110_PA_ONENAND +#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA +#define S5P_PA_SDRAM S5PV210_PA_SDRAM +#define S5P_PA_SROMC S5PV210_PA_SROMC +#define S5P_PA_SYSCON S5PV210_PA_SYSCON +#define S5P_PA_TIMER S5PV210_PA_TIMER -#define S5PV210_PA_ADC (0xE1700000) +#define SAMSUNG_PA_ADC S5PV210_PA_ADC +#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON +#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD -#define S5PV210_PA_DMC0 (0xF0000000) -#define S5PV210_PA_DMC1 (0xF1400000) +/* UART */ -#define S5PV210_PA_MIPI_CSIS 0xFA600000 +#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) -/* compatibiltiy defines. */ -#define S3C_PA_UART S5PV210_PA_UART -#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) -#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) -#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) -#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) -#define S3C_PA_IIC S5PV210_PA_IIC0 -#define S3C_PA_IIC1 S5PV210_PA_IIC1 -#define S3C_PA_IIC2 S5PV210_PA_IIC2 -#define S3C_PA_FB S5PV210_PA_FB -#define S3C_PA_RTC S5PV210_PA_RTC -#define S3C_PA_WDT S5PV210_PA_WATCHDOG -#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG -#define S5P_PA_FIMC0 S5PV210_PA_FIMC0 -#define S5P_PA_FIMC1 S5PV210_PA_FIMC1 -#define S5P_PA_FIMC2 S5PV210_PA_FIMC2 -#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS +#define S3C_PA_UART S5PV210_PA_UART -#define SAMSUNG_PA_ADC S5PV210_PA_ADC -#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON -#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) +#define S5P_PA_UART3 S5P_PA_UART(3) + +#define S5P_SZ_UART SZ_256 #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index 461aa035afc0..557add4fc56c 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c @@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = { static struct regulator_init_data aquila_ldo3_data = { .constraints = { - .name = "VUSB/MIPI_1.1V", + .name = "VUSB+MIPI_1.1V", .min_uV = 1100000, .max_uV = 1100000, .apply_uV = 1, @@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = { static struct regulator_init_data aquila_ldo8_data = { .constraints = { - .name = "VUSB/VADC_3.3V", + .name = "VUSB+VADC_3.3V", .min_uV = 3300000, .max_uV = 3300000, .apply_uV = 1, @@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = { static struct regulator_init_data aquila_ldo9_data = { .constraints = { - .name = "VCC/VCAM_2.8V", + .name = "VCC+VCAM_2.8V", .min_uV = 2800000, .max_uV = 2800000, .apply_uV = 1, @@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_max_voltage1 = 1200000, - .buck1_max_voltage2 = 1200000, - .buck2_max_voltage = 1200000, + .buck1_voltage1 = 1200000, + .buck1_voltage2 = 1200000, + .buck1_voltage3 = 1200000, + .buck1_voltage4 = 1200000, + .buck2_voltage1 = 1200000, + .buck2_voltage2 = 1200000, }; #endif diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index e22d5112fd44..056f5c769b0a 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = { static struct regulator_init_data goni_ldo3_data = { .constraints = { - .name = "VUSB/MIPI_1.1V", + .name = "VUSB+MIPI_1.1V", .min_uV = 1100000, .max_uV = 1100000, .apply_uV = 1, @@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = { static struct regulator_init_data goni_ldo8_data = { .constraints = { - .name = "VUSB/VADC_3.3V", + .name = "VUSB+VADC_3.3V", .min_uV = 3300000, .max_uV = 3300000, .apply_uV = 1, @@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = { static struct regulator_init_data goni_ldo9_data = { .constraints = { - .name = "VCC/VCAM_2.8V", + .name = "VCC+VCAM_2.8V", .min_uV = 2800000, .max_uV = 2800000, .apply_uV = 1, @@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_max_voltage1 = 1200000, - .buck1_max_voltage2 = 1200000, - .buck2_max_voltage = 1200000, + .buck1_voltage1 = 1200000, + .buck1_voltage2 = 1200000, + .buck1_voltage3 = 1200000, + .buck1_voltage4 = 1200000, + .buck2_voltage1 = 1200000, + .buck2_voltage2 = 1200000, }; #endif diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h index 3060f78e12ab..901657fa7a12 100644 --- a/arch/arm/mach-s5pv310/include/mach/map.h +++ b/arch/arm/mach-s5pv310/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5pv310/include/mach/map.h * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PV310 - Memory map definitions @@ -23,90 +23,43 @@ #include <plat/map-s5p.h> -#define S5PV310_PA_SYSRAM (0x02025000) +#define S5PV310_PA_SYSRAM 0x02025000 -#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) - -#define S5PC210_PA_ONENAND (0x0C000000) -#define S5P_PA_ONENAND S5PC210_PA_ONENAND - -#define S5PC210_PA_ONENAND_DMA (0x0C600000) -#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA - -#define S5PV310_PA_CHIPID (0x10000000) -#define S5P_PA_CHIPID S5PV310_PA_CHIPID - -#define S5PV310_PA_SYSCON (0x10010000) -#define S5P_PA_SYSCON S5PV310_PA_SYSCON +#define S5PV310_PA_I2S0 0x03830000 +#define S5PV310_PA_I2S1 0xE3100000 +#define S5PV310_PA_I2S2 0xE2A00000 -#define S5PV310_PA_PMU (0x10020000) +#define S5PV310_PA_PCM0 0x03840000 +#define S5PV310_PA_PCM1 0x13980000 +#define S5PV310_PA_PCM2 0x13990000 -#define S5PV310_PA_CMU (0x10030000) - -#define S5PV310_PA_WATCHDOG (0x10060000) -#define S5PV310_PA_RTC (0x10070000) - -#define S5PV310_PA_DMC0 (0x10400000) - -#define S5PV310_PA_COMBINER (0x10448000) - -#define S5PV310_PA_COREPERI (0x10500000) -#define S5PV310_PA_GIC_CPU (0x10500100) -#define S5PV310_PA_TWD (0x10500600) -#define S5PV310_PA_GIC_DIST (0x10501000) -#define S5PV310_PA_L2CC (0x10502000) - -/* DMA */ -#define S5PV310_PA_MDMA 0x10810000 -#define S5PV310_PA_PDMA0 0x12680000 -#define S5PV310_PA_PDMA1 0x12690000 - -#define S5PV310_PA_GPIO1 (0x11400000) -#define S5PV310_PA_GPIO2 (0x11000000) -#define S5PV310_PA_GPIO3 (0x03860000) - -#define S5PV310_PA_MIPI_CSIS0 0x11880000 -#define S5PV310_PA_MIPI_CSIS1 0x11890000 +#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) -#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) +#define S5PC210_PA_ONENAND 0x0C000000 +#define S5PC210_PA_ONENAND_DMA 0x0C600000 -#define S5PV310_PA_SROMC (0x12570000) -#define S5P_PA_SROMC S5PV310_PA_SROMC +#define S5PV310_PA_CHIPID 0x10000000 -/* S/PDIF */ -#define S5PV310_PA_SPDIF 0xE1100000 +#define S5PV310_PA_SYSCON 0x10010000 +#define S5PV310_PA_PMU 0x10020000 +#define S5PV310_PA_CMU 0x10030000 -/* I2S */ -#define S5PV310_PA_I2S0 0x03830000 -#define S5PV310_PA_I2S1 0xE3100000 -#define S5PV310_PA_I2S2 0xE2A00000 +#define S5PV310_PA_WATCHDOG 0x10060000 +#define S5PV310_PA_RTC 0x10070000 -/* PCM */ -#define S5PV310_PA_PCM0 0x03840000 -#define S5PV310_PA_PCM1 0x13980000 -#define S5PV310_PA_PCM2 0x13990000 +#define S5PV310_PA_DMC0 0x10400000 -/* AC97 */ -#define S5PV310_PA_AC97 0x139A0000 +#define S5PV310_PA_COMBINER 0x10448000 -#define S5PV310_PA_UART (0x13800000) +#define S5PV310_PA_COREPERI 0x10500000 +#define S5PV310_PA_GIC_CPU 0x10500100 +#define S5PV310_PA_TWD 0x10500600 +#define S5PV310_PA_GIC_DIST 0x10501000 +#define S5PV310_PA_L2CC 0x10502000 -#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) -#define S5P_PA_UART0 S5P_PA_UART(0) -#define S5P_PA_UART1 S5P_PA_UART(1) -#define S5P_PA_UART2 S5P_PA_UART(2) -#define S5P_PA_UART3 S5P_PA_UART(3) -#define S5P_PA_UART4 S5P_PA_UART(4) - -#define S5P_SZ_UART SZ_256 - -#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) - -#define S5PV310_PA_TIMER (0x139D0000) -#define S5P_PA_TIMER S5PV310_PA_TIMER - -#define S5PV310_PA_SDRAM (0x40000000) -#define S5P_PA_SDRAM S5PV310_PA_SDRAM +#define S5PV310_PA_MDMA 0x10810000 +#define S5PV310_PA_PDMA0 0x12680000 +#define S5PV310_PA_PDMA1 0x12690000 #define S5PV310_PA_SYSMMU_MDMA 0x10A40000 #define S5PV310_PA_SYSMMU_SSS 0x10A50000 @@ -125,8 +78,31 @@ #define S5PV310_PA_SYSMMU_MFC_L 0x13620000 #define S5PV310_PA_SYSMMU_MFC_R 0x13630000 -/* compatibiltiy defines. */ -#define S3C_PA_UART S5PV310_PA_UART +#define S5PV310_PA_GPIO1 0x11400000 +#define S5PV310_PA_GPIO2 0x11000000 +#define S5PV310_PA_GPIO3 0x03860000 + +#define S5PV310_PA_MIPI_CSIS0 0x11880000 +#define S5PV310_PA_MIPI_CSIS1 0x11890000 + +#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) + +#define S5PV310_PA_SROMC 0x12570000 + +#define S5PV310_PA_UART 0x13800000 + +#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) + +#define S5PV310_PA_AC97 0x139A0000 + +#define S5PV310_PA_TIMER 0x139D0000 + +#define S5PV310_PA_SDRAM 0x40000000 + +#define S5PV310_PA_SPDIF 0xE1100000 + +/* Compatibiltiy Defines */ + #define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) #define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) #define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) @@ -141,7 +117,28 @@ #define S3C_PA_IIC7 S5PV310_PA_IIC(7) #define S3C_PA_RTC S5PV310_PA_RTC #define S3C_PA_WDT S5PV310_PA_WATCHDOG + +#define S5P_PA_CHIPID S5PV310_PA_CHIPID #define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 #define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 +#define S5P_PA_ONENAND S5PC210_PA_ONENAND +#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA +#define S5P_PA_SDRAM S5PV310_PA_SDRAM +#define S5P_PA_SROMC S5PV310_PA_SROMC +#define S5P_PA_SYSCON S5PV310_PA_SYSCON +#define S5P_PA_TIMER S5PV310_PA_TIMER + +/* UART */ + +#define S3C_PA_UART S5PV310_PA_UART + +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) +#define S5P_PA_UART3 S5P_PA_UART(3) +#define S5P_PA_UART4 S5P_PA_UART(4) + +#define S5P_SZ_UART SZ_256 #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index 2123b96b5638..4303a86e6e38 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -454,6 +454,7 @@ static void __init ag5evm_init(void) gpio_direction_output(GPIO_PORT217, 0); mdelay(1); gpio_set_value(GPIO_PORT217, 1); + mdelay(100); /* LCD backlight controller */ gpio_request(GPIO_PORT235, NULL); /* RESET */ diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 3cf0951caa2d..81d6536552a9 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void) lcdc_info.clock_source = LCDC_CLK_BUS; lcdc_info.ch[0].interface_type = RGB18; - lcdc_info.ch[0].clock_divider = 2; + lcdc_info.ch[0].clock_divider = 3; lcdc_info.ch[0].flags = 0; lcdc_info.ch[0].lcd_size_cfg.width = 152; lcdc_info.ch[0].lcd_size_cfg.height = 91; diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index fb4213a4e15a..1657eac5dde2 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = { .lcd_cfg = mackerel_lcdc_modes, .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), .interface_type = RGB24, - .clock_divider = 2, + .clock_divider = 3, .flags = 0, .lcd_size_cfg.width = 152, .lcd_size_cfg.height = 91, diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index ddd4a1b775f0..7e58904c1c8c 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c @@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = { }; enum { MSTP001, - MSTP125, MSTP118, MSTP116, MSTP100, + MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, MSTP219, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, @@ -275,6 +275,10 @@ enum { MSTP001, static struct clk mstp_clks[MSTP_NR] = { [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ + [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ + [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ + [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ + [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ @@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("r_clk", &r_clk), /* DIV6 clocks */ + CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), + CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), + CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), @@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = { /* MSTP32 clocks */ CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ - CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ + CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ + CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ + CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ + CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ - CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ + CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt index efd3687ba190..3029aba38688 100644 --- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt +++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt @@ -6,13 +6,10 @@ LIST "RWT Setting" EW 0xE6020004, 0xA500 EW 0xE6030004, 0xA500 -DD 0x01001000, 0x01001000 - LIST "GPIO Setting" EB 0xE6051013, 0xA2 LIST "CPG" -ED 0xE6150080, 0x00000180 ED 0xE61500C0, 0x00000002 WAIT 1, 0xFE40009C @@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040 WAIT 1, 0xFE40009C +LIST "SUB/USBClk" +ED 0xE6150080, 0x00000180 + LIST "BSC" ED 0xFEC10000, 0x00E0001B @@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505 ED 0xFE40004C, 0x00110209 ED 0xFE400010, 0x00000087 -WAIT 10, 0xFE40009C +WAIT 30, 0xFE40009C ED 0xFE400084, 0x0000003F EB 0xFE500000, 0x00 @@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050 WAIT 1, 0xFE40009C -ED 0xE6150354, 0x00000002 +ED 0xFE400354, 0x01AD8002 LIST "SCIF0 - Serial port for earlyprintk" EB 0xE6053098, 0x11 diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt index efd3687ba190..3029aba38688 100644 --- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt +++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt @@ -6,13 +6,10 @@ LIST "RWT Setting" EW 0xE6020004, 0xA500 EW 0xE6030004, 0xA500 -DD 0x01001000, 0x01001000 - LIST "GPIO Setting" EB 0xE6051013, 0xA2 LIST "CPG" -ED 0xE6150080, 0x00000180 ED 0xE61500C0, 0x00000002 WAIT 1, 0xFE40009C @@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040 WAIT 1, 0xFE40009C +LIST "SUB/USBClk" +ED 0xE6150080, 0x00000180 + LIST "BSC" ED 0xFEC10000, 0x00E0001B @@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505 ED 0xFE40004C, 0x00110209 ED 0xFE400010, 0x00000087 -WAIT 10, 0xFE40009C +WAIT 30, 0xFE40009C ED 0xFE400084, 0x0000003F EB 0xFE500000, 0x00 @@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050 WAIT 1, 0xFE40009C -ED 0xE6150354, 0x00000002 +ED 0xFE400354, 0x01AD8002 LIST "SCIF0 - Serial port for earlyprintk" EB 0xE6053098, 0x11 diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h index cacf17a958cd..53677e464d4b 100644 --- a/arch/arm/mach-spear3xx/include/mach/spear320.h +++ b/arch/arm/mach-spear3xx/include/mach/spear320.h @@ -62,7 +62,7 @@ #define SPEAR320_SMII1_BASE 0xAB000000 #define SPEAR320_SMII1_SIZE 0x01000000 -#define SPEAR320_SOC_CONFIG_BASE 0xB4000000 +#define SPEAR320_SOC_CONFIG_BASE 0xB3000000 #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 /* Interrupt registers offsets and masks */ #define INT_STS_MASK_REG 0x04 diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h index 66ad2760c621..04c779832c78 100644 --- a/arch/arm/mach-tegra/include/mach/kbc.h +++ b/arch/arm/mach-tegra/include/mach/kbc.h @@ -57,5 +57,6 @@ struct tegra_kbc_platform_data { const struct matrix_keymap_data *keymap_data; bool wakeup; + bool use_fn_map; }; #endif diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb95866..f2ce38e085d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask) static inline void cache_sync(void) { void __iomem *base = l2x0_base; + +#ifdef CONFIG_ARM_ERRATA_753970 + /* write to an unmmapped register */ + writel_relaxed(0, base + L2X0_DUMMY_REG); +#else writel_relaxed(0, base + L2X0_CACHE_SYNC); +#endif cache_wait(base + L2X0_CACHE_SYNC, 1); } diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4e..8e3356239136 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -264,6 +264,12 @@ __v7_setup: orreq r10, r10, #1 << 6 @ set bit #6 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif +#ifdef CONFIG_ARM_ERRATA_751472 + cmp r6, #0x30 @ present prior to r3p0 + mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register + orrlt r10, r10, #1 << 11 @ set bit #11 + mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif 3: mov r10, #0 #ifdef HARVARD_CACHE diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 459b319a9fad..49d3208793e5 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox) struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) { - struct omap_mbox *mbox; - int ret; + struct omap_mbox *_mbox, *mbox = NULL; + int i, ret; if (!mboxes) return ERR_PTR(-EINVAL); - for (mbox = *mboxes; mbox; mbox++) - if (!strcmp(mbox->name, name)) + for (i = 0; (_mbox = mboxes[i]); i++) { + if (!strcmp(_mbox->name, name)) { + mbox = _mbox; break; + } + } if (!mbox) return ERR_PTR(-ENOENT); diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c index 6a7342886171..afaf87fdb93e 100644 --- a/arch/arm/plat-s5p/dev-uart.c +++ b/arch/arm/plat-s5p/dev-uart.c @@ -28,7 +28,7 @@ static struct resource s5p_uart0_resource[] = { [0] = { .start = S5P_PA_UART0, - .end = S5P_PA_UART0 + S5P_SZ_UART, + .end = S5P_PA_UART0 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = { static struct resource s5p_uart1_resource[] = { [0] = { .start = S5P_PA_UART1, - .end = S5P_PA_UART1 + S5P_SZ_UART, + .end = S5P_PA_UART1 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = { static struct resource s5p_uart2_resource[] = { [0] = { .start = S5P_PA_UART2, - .end = S5P_PA_UART2 + S5P_SZ_UART, + .end = S5P_PA_UART2 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 [0] = { .start = S5P_PA_UART3, - .end = S5P_PA_UART3 + S5P_SZ_UART, + .end = S5P_PA_UART3 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 [0] = { .start = S5P_PA_UART4, - .end = S5P_PA_UART4 + S5P_SZ_UART, + .end = S5P_PA_UART4 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 [0] = { .start = S5P_PA_UART5, - .end = S5P_PA_UART5 + S5P_SZ_UART, + .end = S5P_PA_UART5 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c index 236ef8427d7d..3e4bd8147bf4 100644 --- a/arch/arm/plat-samsung/dev-ts.c +++ b/arch/arm/plat-samsung/dev-ts.c @@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd) s3c_device_ts.dev.platform_data = npd; } -EXPORT_SYMBOL(s3c24xx_ts_set_platdata); diff --git a/arch/arm/plat-samsung/dev-uart.c b/arch/arm/plat-samsung/dev-uart.c index 3776cd952450..5928105490fa 100644 --- a/arch/arm/plat-samsung/dev-uart.c +++ b/arch/arm/plat-samsung/dev-uart.c @@ -15,6 +15,8 @@ #include <linux/kernel.h> #include <linux/platform_device.h> +#include <plat/devs.h> + /* uart devices */ static struct platform_device s3c24xx_uart_device0 = { diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h index 99ba6789cc97..6dd455bafdfd 100644 --- a/arch/arm/plat-spear/include/plat/uncompress.h +++ b/arch/arm/plat-spear/include/plat/uncompress.h @@ -24,10 +24,10 @@ static inline void putc(int c) { void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; - while (readl(base + UART01x_FR) & UART01x_FR_TXFF) + while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF) barrier(); - writel(c, base + UART01x_DR); + writel_relaxed(c, base + UART01x_DR); } static inline void flush(void) diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h index 09e9372aea21..8c8b24d07046 100644 --- a/arch/arm/plat-spear/include/plat/vmalloc.h +++ b/arch/arm/plat-spear/include/plat/vmalloc.h @@ -14,6 +14,6 @@ #ifndef __PLAT_VMALLOC_H #define __PLAT_VMALLOC_H -#define VMALLOC_END 0xF0000000 +#define VMALLOC_END 0xF0000000UL #endif /* __PLAT_VMALLOC_H */ diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S index 250f4d4b9436..06a5e674401f 100644 --- a/arch/blackfin/lib/outs.S +++ b/arch/blackfin/lib/outs.S @@ -13,6 +13,8 @@ .align 2 ENTRY(_outsl) + CC = R2 == 0; + IF CC JUMP 1f; P0 = R0; /* P0 = port */ P1 = R1; /* P1 = address */ P2 = R2; /* P2 = count */ @@ -20,10 +22,12 @@ ENTRY(_outsl) LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; .Llong_loop_s: R0 = [P1++]; .Llong_loop_e: [P0] = R0; - RTS; +1: RTS; ENDPROC(_outsl) ENTRY(_outsw) + CC = R2 == 0; + IF CC JUMP 1f; P0 = R0; /* P0 = port */ P1 = R1; /* P1 = address */ P2 = R2; /* P2 = count */ @@ -31,10 +35,12 @@ ENTRY(_outsw) LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; .Lword_loop_s: R0 = W[P1++]; .Lword_loop_e: W[P0] = R0; - RTS; +1: RTS; ENDPROC(_outsw) ENTRY(_outsb) + CC = R2 == 0; + IF CC JUMP 1f; P0 = R0; /* P0 = port */ P1 = R1; /* P1 = address */ P2 = R2; /* P2 = count */ @@ -42,10 +48,12 @@ ENTRY(_outsb) LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; .Lbyte_loop_s: R0 = B[P1++]; .Lbyte_loop_e: B[P0] = R0; - RTS; +1: RTS; ENDPROC(_outsb) ENTRY(_outsw_8) + CC = R2 == 0; + IF CC JUMP 1f; P0 = R0; /* P0 = port */ P1 = R1; /* P1 = address */ P2 = R2; /* P2 = count */ @@ -56,5 +64,5 @@ ENTRY(_outsw_8) R0 = R0 << 8; R0 = R0 + R1; .Lword8_loop_e: W[P0] = R0; - RTS; +1: RTS; ENDPROC(_outsw_8) diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S index 790c767ca95a..ab4a925a443e 100644 --- a/arch/blackfin/mach-common/cache.S +++ b/arch/blackfin/mach-common/cache.S @@ -58,6 +58,8 @@ 1: .ifeqs "\flushins", BROK_FLUSH_INST \flushins [P0++]; + nop; + nop; 2: nop; .else 2: \flushins [P0++]; diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 442218980db0..c49be845f96a 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -72,11 +72,6 @@ SECTIONS INIT_TEXT_SECTION(PAGE_SIZE) .init.data : { INIT_DATA } .init.setup : { INIT_SETUP(16) } -#ifdef CONFIG_ETRAX_ARCH_V32 - __start___param = .; - __param : { *(__param) } - __stop___param = .; -#endif .initcall.init : { INIT_CALLS } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,6 +4,7 @@ config MIPS select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_OPROFILE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_ARCH_KGDB @@ -208,6 +209,7 @@ config MACH_JZ4740 select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK select HAVE_PWM + select HAVE_CLK config LASAT bool "LASAT Networks platforms" @@ -333,6 +335,8 @@ config PNX8550_STB810 config PMC_MSP bool "PMC-Sierra MSP chipsets" depends on EXPERIMENTAL + select CEVT_R4K + select CSRC_R4K select DMA_NONCOHERENT select SWAP_IO_SPACE select NO_EXCEPT_FILL diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c @@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); static void mtx1_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void mtx1_power_off(void) diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c @@ -28,6 +28,8 @@ #include <linux/mtd/physmap.h> #include <mtd/mtd-abi.h> +#include <asm/mach-au1x00/au1xxx_eth.h> + static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { &mtx1_mtd, }; +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { + .phy_search_highest_addr = 1, + .phy1_search_mac0 = 1, +}; + static int __init mtx1_register_devices(void) { int rc; + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); + rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c @@ -36,8 +36,8 @@ static void xxs1500_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void xxs1500_power_off(void) diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h @@ -11,15 +11,5 @@ #ifndef __MIPS_PERF_EVENT_H__ #define __MIPS_PERF_EVENT_H__ - -/* - * MIPS performance counters do not raise NMI upon overflow, a regular - * interrupt will be signaled. Hence we can do the pending perf event - * work at the tail of the irq handler. - */ -static inline void -set_perf_event_pending(void) -{ -} - +/* Leave it empty here. The file is required by linux/perf_event.h */ #endif /* __MIPS_PERF_EVENT_H__ */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -17,29 +17,13 @@ #include <asm/cacheflush.h> #include <asm/uasm.h> -/* - * If the Instruction Pointer is in module space (0xc0000000), return true; - * otherwise, it is in kernel space (0x80000000), return false. - * - * FIXME: This will not work when the kernel space and module space are the - * same. If they are the same, we need to modify scripts/recordmcount.pl, - * ftrace_make_nop/call() and the other related parts to ensure the - * enabling/disabling of the calling site to _mcount is right for both kernel - * and module. - */ - -static inline int in_module(unsigned long ip) -{ - return ip & 0x40000000; -} +#include <asm-generic/sections.h> #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ -#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) @@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount --> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions + */ + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; /* - * We have compiled module with -mlong-calls, but compiled the kernel - * without it, we need to cope with them respectively. + * If ip is in kernel space, no long call, otherwise, long call is + * needed. */ - if (in_module(ip)) { -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * move $12, ra_address - * jalr v1 - * sub sp, sp, 8 - * 1: offset = 5 instructions - */ - new = INSN_B_1F_5; -#else - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * jalr v1 - * nop | move $12, ra_address | sub sp, sp, 8 - * 1: offset = 4 instructions - */ - new = INSN_B_1F_4; -#endif - } else { - /* - * move at, ra - * jal _mcount --> nop - */ - new = INSN_NOP; - } + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + return ftrace_modify_code(ip, new); } @@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - /* ip, module: 0xc0000000, kernel: 0x80000000 */ - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : + insn_lui_v1_hi16_mcount; return ftrace_modify_code(ip, new); } @@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_addr(unsigned long self_addr, - unsigned long parent, - unsigned long parent_addr, - unsigned long fp) +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { - unsigned long sp, ip, ra; + unsigned long sp, ip, tmp; unsigned int code; int faulted; /* - * For module, move the ip from calling site of mcount to the - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for - * kernel, move to the instruction "move ra, at"(offset is 12) + * For module, move the ip from the return address after the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for + * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_module(self_addr) ? 20 : 12); + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} * ra, offset(sp)" instruction */ do { - ip -= 4; - /* get the code at "ip": code = *(unsigned int *)ip; */ safe_load_code(code, ip, faulted); @@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * store the ra on the stack */ if ((code & S_R_SP) != S_R_SP) - return parent_addr; + return parent_ra_addr; - } while (((code & S_RA_SP) != S_RA_SP)); + /* Move to the next instruction */ + ip -= 4; + } while ((code & S_RA_SP) != S_RA_SP); sp = fp + (code & OFFSET_MASK); - /* ra = *(unsigned long *)sp; */ - safe_load_stack(ra, sp, faulted); + /* tmp = *(unsigned long *)sp; */ + safe_load_stack(tmp, sp, faulted); if (unlikely(faulted)) return 0; - if (ra == parent) + if (tmp == old_parent_ra) return sp; return 0; } @@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { - unsigned long old; + unsigned long old_parent_ra; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; - int faulted; + int faulted, insns; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; /* - * "parent" is the stack address saved the return address of the caller - * of _mcount. + * "parent_ra_addr" is the stack address saved the return address of + * the caller of _mcount. * * if the gcc < 4.5, a leaf function does not save the return address * in the stack address, so, we "emulate" one in _mcount's stack space, @@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, * do it in ftrace_graph_caller of mcount.S. */ - /* old = *parent; */ - safe_load_stack(old, parent, faulted); + /* old_parent_ra = *parent_ra_addr; */ + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, - (unsigned long)parent, fp); + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, + old_parent_ra, (unsigned long)parent_ra_addr, fp); /* * If fails when getting the stack address of the non-leaf function's * ra, stop function graph tracer and return */ - if (parent == 0) + if (parent_ra_addr == 0) goto out; #endif - /* *parent = return_hooker; */ - safe_store_stack(return_hooker, parent, faulted); + /* *parent_ra_addr = return_hooker; */ + safe_store_stack(return_hooker, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == - -EBUSY) { - *parent = old; + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) + == -EBUSY) { + *parent_ra_addr = old_parent_ra; return; } - trace.func = self_addr; + /* + * Get the recorded ip of the current mcount calling site in the + * __mcount_loc section, which will be used to filter the function + * entries configured through the tracing/set_graph_function interface. + */ + + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; - *parent = old; + *parent_ra_addr = old_parent_ra; } return; out: diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, return ret; } -static int mipspmu_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* To look for a free counter for this event. */ - idx = mipspmu->alloc_counter(cpuc, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then - * make sure it is disabled. - */ - event->hw.idx = idx; - mipspmu->disable_event(idx); - cpuc->events[idx] = event; - - /* Set the period for the event. */ - mipspmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - mipspmu->enable_event(hwc, idx); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - return err; -} - static void mipspmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) @@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, unsigned long flags; int shift = 64 - TOTAL_BITS; s64 prev_raw_count, new_raw_count; - s64 delta; + u64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); @@ -231,32 +196,90 @@ again: return; } -static void mipspmu_disable(struct perf_event *event) +static void mipspmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + mipspmu->enable_event(hwc, hwc->idx); +} + +static void mipspmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + mipspmu->disable_event(hwc->idx); + barrier(); + mipspmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int mipspmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int idx; + int err = 0; + perf_pmu_disable(event->pmu); - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + /* To look for a free counter for this event. */ + idx = mipspmu->alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } - /* We are working on a local event. */ + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; mipspmu->disable_event(idx); + cpuc->events[idx] = event; - barrier(); - - mipspmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + mipspmu_start(event, PERF_EF_RELOAD); + /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; } -static void mipspmu_unthrottle(struct perf_event *event) +static void mipspmu_del(struct perf_event *event, int flags) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; - mipspmu->enable_event(hwc, hwc->idx); + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + mipspmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); } static void mipspmu_read(struct perf_event *event) @@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) mipspmu_event_update(event, hwc, hwc->idx); } -static struct pmu pmu = { - .enable = mipspmu_enable, - .disable = mipspmu_disable, - .unthrottle = mipspmu_unthrottle, - .read = mipspmu_read, -}; +static void mipspmu_enable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->start(); +} + +static void mipspmu_disable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->stop(); +} static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmu_reserve_mutex); @@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) perf_irq = save_perf_irq; } +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu->num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int mipspmu_event_init(struct perf_event *event) +{ + int err = 0; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (!mipspmu || event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { + atomic_dec(&active_events); + return -ENOSPC; + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static struct pmu pmu = { + .pmu_enable = mipspmu_enable, + .pmu_disable = mipspmu_disable, + .event_init = mipspmu_event_init, + .add = mipspmu_add, + .del = mipspmu_del, + .start = mipspmu_start, + .stop = mipspmu_stop, + .read = mipspmu_read, +}; + static inline unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) { @@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_hwc = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + /* Allow mixed event group. So return 1 to pass validation. */ + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; } @@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) return 0; } -/* - * mipsxx/rm9000/loongson2 have different performance counters, they have - * specific low-level init routines. - */ -static void reset_counters(void *arg); -static int __hw_perf_event_init(struct perf_event *event); - -static void hw_perf_event_destroy(struct perf_event *event) -{ - if (atomic_dec_and_mutex_lock(&active_events, - &pmu_reserve_mutex)) { - /* - * We must not call the destroy function with interrupts - * disabled. - */ - on_each_cpu(reset_counters, - (void *)(long)mipspmu->num_counters, 1); - mipspmu_free_irq(); - mutex_unlock(&pmu_reserve_mutex); - } -} - -const struct pmu *hw_perf_event_init(struct perf_event *event) -{ - int err = 0; - - if (!mipspmu || event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return ERR_PTR(-ENODEV); - - if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { - atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); - } - - mutex_lock(&pmu_reserve_mutex); - if (atomic_read(&active_events) == 0) - err = mipspmu_get_irq(); - - if (!err) - atomic_inc(&active_events); - mutex_unlock(&pmu_reserve_mutex); - } - - if (err) - return ERR_PTR(err); - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err ? ERR_PTR(err) : &pmu; -} - -void hw_perf_enable(void) -{ - if (mipspmu) - mipspmu->start(); -} - -void hw_perf_disable(void) -{ - if (mipspmu) - mipspmu->stop(); -} - /* This is needed by specific irq handlers in perf_event_*.c */ static void handle_associated_event(struct cpu_hw_events *cpuc, @@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, #include "perf_event_mipsxx.c" /* Callchain handling code. */ -static inline void -callchain_store(struct perf_callchain_entry *entry, - u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} /* * Leave userspace callchain empty for now. When we find a way to trace * the user stack callchains, we add here. */ -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) { } @@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { - callchain_store(entry, addr); + perf_callchain_store(entry, addr); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; } } } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; - callchain_store(entry, PERF_CONTEXT_KERNEL); if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(current); @@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, return; } do { - callchain_store(entry, pc); + perf_callchain_store(entry, pc); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else - callchain_store(entry, PERF_CONTEXT_KERNEL); save_raw_perf_callchain(entry, sp); #endif } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || !current->pid) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) { - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - if (regs) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) * interrupt, not NMI. */ if (handled == IRQ_HANDLED) - perf_event_do_pending(); + irq_work_run(); #ifdef CONFIG_MIPS_MT_SMP read_unlock(&pmuint_rwlock); @@ -1045,6 +1045,8 @@ init_hw_perf_events(void) "CPU, irq %d%s\n", mipspmu->name, counters, irq, irq < 0 ? " (share with timer interrupt)" : ""); + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + return 0; } early_initcall(init_hw_perf_events); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) static int protected_restore_fp_context(struct sigcontext __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) static int protected_restore_fp_context32(struct sigcontext32 __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) */ static struct task_struct *cpu_idle_thread[NR_CPUS]; +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; @@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) * Linux can schedule processes on this slave. */ if (!cpu_idle_thread[cpu]) { - idle = fork_idle(cpu); - cpu_idle_thread[cpu] = idle; + /* + * Schedule work item to avoid forking user task + * Ported from arch/x86/kernel/smpboot.c + */ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + idle = cpu_idle_thread[cpu] = c_idle.idle; if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -383,12 +383,11 @@ save_static_function(sys_sysmips); static int __used noinline _sys_sysmips(nabi_no_regargs struct pt_regs regs) { - long cmd, arg1, arg2, arg3; + long cmd, arg1, arg2; cmd = regs.regs[4]; arg1 = regs.regs[5]; arg2 = regs.regs[6]; - arg3 = regs.regs[7]; switch (cmd) { case MIPS_ATOMIC_SET: @@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) if (arg1 & 2) set_thread_flag(TIF_LOGADE); else - clear_thread_flag(TIF_FIXADE); + clear_thread_flag(TIF_LOGADE); return 0; diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -148,9 +148,9 @@ struct { spinlock_t tc_list_lock; struct list_head tc_list; /* Thread contexts */ } vpecontrol = { - .vpe_list_lock = SPIN_LOCK_UNLOCKED, + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), - .tc_list_lock = SPIN_LOCK_UNLOCKED, + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -1,6 +1,7 @@ +if MACH_LOONGSON + choice prompt "Machine Type" - depends on MACH_LOONGSON config LEMOTE_FULOONG2E bool "Lemote Fuloong(2e) mini-PC" @@ -87,3 +88,5 @@ config LOONGSON_UART_BASE config LOONGSON_MC146818 bool default n + +endif # MACH_LOONGSON diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c @@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) strcat(arcs_cmdline, " "); } - if ((strstr(arcs_cmdline, "console=")) == NULL) - strcat(arcs_cmdline, " console=ttyS0,115200"); - if ((strstr(arcs_cmdline, "root=")) == NULL) - strcat(arcs_cmdline, " root=/dev/hda1"); - prom_init_machtype(); } diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c @@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) void __init prom_init_machtype(void) { - char *p, str[MACHTYPE_LEN]; + char *p, str[MACHTYPE_LEN + 1]; int machtype = MACH_LEMOTE_FL2E; mips_machtype = LOONGSON_MACHTYPE; @@ -53,6 +53,7 @@ void __init prom_init_machtype(void) } p += strlen("machtype="); strncpy(str, p, MACHTYPE_LEN); + str[MACHTYPE_LEN] = '\0'; p = strstr(str, " "); if (p) *p = '\0'; diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h @@ -70,7 +70,7 @@ #define COMPXSP \ - unsigned xm; int xe; int xs; int xc + unsigned xm; int xe; int xs __maybe_unused; int xc #define COMPYSP \ unsigned ym; int ye; int ys; int yc @@ -104,7 +104,7 @@ #define COMPXDP \ -u64 xm; int xe; int xs; int xc +u64 xm; int xe; int xs __maybe_unused; int xc #define COMPYDP \ u64 ym; int ye; int ys; int yc diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; - unsigned long lastpfn; + unsigned long lastpfn __maybe_unused; pagetable_init(); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -109,6 +109,8 @@ static bool scratchpad_available(void) static int scratchpad_offset(int i) { BUG(); + /* Really unreachable, but evidently some GCC want this. */ + return 0; } #endif /* diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c @@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { * RETURNS: PCIBIOS_SUCCESSFUL - success * ****************************************************************************/ -static int bpci_interrupt(int irq, void *dev_id) +static irqreturn_t bpci_interrupt(int irq, void *dev_id) { struct msp_pci_regs *preg = (void *)PCI_BASE_REG; unsigned int stat = preg->if_status; @@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) /* write to clear all asserted interrupts */ preg->if_status = stat; - return PCIBIOS_SUCCESSFUL; + return IRQ_HANDLED; } /***************************************************************************** diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig @@ -4,15 +4,11 @@ choice config PMC_MSP4200_EVAL bool "PMC-Sierra MSP4200 Eval Board" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI config PMC_MSP4200_GW bool "PMC-Sierra MSP4200 VoIP Gateway" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c @@ -81,7 +81,7 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_rate/2; } -unsigned int __init get_c0_compare_int(void) +unsigned int __cpuinit get_c0_compare_int(void) { return MSP_INT_VPE0_TIMER; } diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 92d2f9298e38..9d773a639513 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m, * Atomically reads the value of @v. Note that the guaranteed * useful range of an atomic_t is only 24 bits. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (ACCESS_ONCE((v)->counter)) /** * atomic_set - set atomic variable diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 679dee0bbd08..3d6e60dad9d9 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_check(x, ptr, size) \ ({ \ + const __typeof__(ptr) __guc_ptr = (ptr); \ int _e; \ - if (likely(__access_ok((unsigned long) (ptr), (size)))) \ - _e = __get_user_nocheck((x), (ptr), (size)); \ + if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ + _e = __get_user_nocheck((x), __guc_ptr, (size)); \ else { \ _e = -EFAULT; \ (x) = (__typeof__(x))0; \ diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c index a8933a60b2d4..a6b63dde603d 100644 --- a/arch/mn10300/mm/cache-inv-icache.c +++ b/arch/mn10300/mm/cache-inv-icache.c @@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end) /* invalidate the icache coverage on that region */ mn10300_local_icache_inv_range2(addr + off, size); - smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); } /** @@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end) * directly */ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; mn10300_icache_inv_range(start_page, end); - smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); + smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); if (start_page == start) goto done; end = start_page; diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 380d48bacd16..26b8c807f8f1 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -33,9 +33,25 @@ // //---------------------------------------------------------------------------- #include <linux/cache.h> +#include <linux/threads.h> #include <asm/types.h> #include <asm/mmu.h> +/* + * We only have to have statically allocated lppaca structs on + * legacy iSeries, which supports at most 64 cpus. + */ +#ifdef CONFIG_PPC_ISERIES +#if NR_CPUS < 64 +#define NR_LPPACAS NR_CPUS +#else +#define NR_LPPACAS 64 +#endif +#else /* not iSeries */ +#define NR_LPPACAS 1 +#endif + + /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k * alignment is sufficient to prevent this */ struct lppaca { diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 991d5998d6be..fe56a23e1ff0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -240,6 +240,12 @@ struct machdep_calls { * claims to support kexec. */ int (*machine_kexec_prepare)(struct kimage *image); + + /* Called to perform the _real_ kexec. + * Do NOT allocate memory or fail here. We are past the point of + * no return. + */ + void (*machine_kexec)(struct kimage *image); #endif /* CONFIG_KEXEC */ #ifdef CONFIG_SUSPEND diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 49a170af8145..a5f8672eeff3 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image) save_ftrace_enabled = __ftrace_enabled_save(); - default_machine_kexec(image); + if (ppc_md.machine_kexec) + ppc_md.machine_kexec(image); + else + default_machine_kexec(image); __ftrace_enabled_restore(save_ftrace_enabled); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ebf9846f3c3b..f4adf89d7614 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -27,20 +27,6 @@ extern unsigned long __toc_start; #ifdef CONFIG_PPC_BOOK3S /* - * We only have to have statically allocated lppaca structs on - * legacy iSeries, which supports at most 64 cpus. - */ -#ifdef CONFIG_PPC_ISERIES -#if NR_CPUS < 64 -#define NR_LPPACAS NR_CPUS -#else -#define NR_LPPACAS 64 -#endif -#else /* not iSeries */ -#define NR_LPPACAS 1 -#endif - -/* * The structure which the hypervisor knows about - this structure * should not cross a page boundary. The vpa_init/register_vpa call * is now known to fail if the lppaca structure crosses a page diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a1d5cb76932..8303a6c65ef7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) prime_debug_regs(new_thread); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ +#ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) { if (thread->dabr) { @@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) set_dabr(0); } } +#endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ int set_dabr(unsigned long dabr) @@ -670,11 +672,11 @@ void flush_thread(void) { discard_lazy_cpu_state(); -#ifdef CONFIG_HAVE_HW_BREAKPOINTS +#ifdef CONFIG_HAVE_HW_BREAKPOINT flush_ptrace_hw_breakpoint(current); -#else /* CONFIG_HAVE_HW_BREAKPOINTS */ +#else /* CONFIG_HAVE_HW_BREAKPOINT */ set_debug_reg_defaults(¤t->thread); -#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ } void diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index fd4812329570..0dc95c0aa3be 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1516,7 +1516,8 @@ int start_topology_update(void) { int rc = 0; - if (firmware_has_feature(FW_FEATURE_VPHN) && + /* Disabled until races with load balancing are fixed */ + if (0 && firmware_has_feature(FW_FEATURE_VPHN) && get_lppaca()->shared_proc) { vphn_enabled = 1; setup_cpu_associativity_change_counters(); diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..c14d09f614f3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. - * - * Must be called from within some kind of spinlock/non-preempt region... */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; int ssize; @@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, ssize, 0); + put_cpu_var(ppc64_tlb_batch); return; } @@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); } /* diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index fdb7384c0c4f..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c @@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); - for (i = 0; i < NR_CPUS; i++) { - if (lppaca_of(i).dyn_proc_status >= 2) + for (i = 0; i < NR_LPPACAS; i++) { + if (lppaca[i].dyn_proc_status >= 2) continue; snprintf(p, 32 - (p - buf), "@%d", i); @@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) dt_prop_str(dt, "device_type", device_type_cpu); - index = lppaca_of(i).dyn_hv_phys_proc_index; + index = lppaca[i].dyn_hv_phys_proc_index; d = &xIoHriProcessorVpd[index]; dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..2946ae10fbfd 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void) * on but calling this function multiple times is fine. */ identify_cpu(0, mfspr(SPRN_PVR)); + initialise_paca(&boot_paca, 0); powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_LPAR; diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 0851eb1e919e..2751b3a8a66f 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -133,11 +133,12 @@ unsigned long decompress_kernel(void) unsigned long output_addr; unsigned char *output; - check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); + output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; + check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); memset(&_bss, 0, &_ebss - &_bss); free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); + output = (unsigned char *) output_addr; #ifdef CONFIG_BLK_DEV_INITRD /* diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index f42dbabc0d30..48884f89ab92 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) BUG_ON(ret != bsize); data += bsize - index; len -= bsize - index; + index = 0; } /* process as many blocks as possible */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea117181..5c5ba10384c2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -36,14 +36,19 @@ static inline int atomic_read(const atomic_t *v) { - barrier(); - return v->counter; + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic_set(atomic_t *v, int i) { - v->counter = i; - barrier(); + asm volatile( + " st %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline long long atomic64_read(const atomic64_t *v) { - barrier(); - return v->counter; + long long c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic64_set(atomic64_t *v, long long i) { - v->counter = i; - barrier(); + asm volatile( + " stg %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline long long atomic64_add_return(long long i, atomic64_t *v) diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b643..2a30d5ac0667 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -13,6 +13,7 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 +#define NET_SKB_PAD 32 #define __read_mostly __attribute__((__section__(".data..read_mostly"))) diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index a78701da775b..4a5350037c8f 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -3,7 +3,7 @@ #include <asm-generic/sections.h> -extern void __nosave_begin, __nosave_end; +extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char _ebss[]; diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 672944f5b19c..e53b4b38bd11 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -14,7 +14,7 @@ #include <linux/io.h> #include <linux/sh_timer.h> #include <linux/serial_sci.h> -#include <asm/machtypes.h> +#include <generated/machtypes.h> static struct resource rtc_resources[] = { [0] = { @@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = { void __init plat_early_device_setup(void) { + struct platform_device *dev[1]; + if (mach_is_rts7751r2d()) { scif_platform_data.scscr |= SCSCR_CKE1; - early_platform_add_devices(&scif_device, 1); + dev[0] = &scif_device; + early_platform_add_devices(dev, 1); } else { - early_platform_add_devices(&sci_device, 1); - early_platform_add_devices(&scif_device, 1); + dev[0] = &sci_device; + early_platform_add_devices(dev, 1); + dev[0] = &scif_device; + early_platform_add_devices(dev, 1); } early_platform_add_devices(sh7750_early_devices, diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c index faa8f86c0db4..0901b2f14e15 100644 --- a/arch/sh/lib/delay.c +++ b/arch/sh/lib/delay.c @@ -10,6 +10,16 @@ void __delay(unsigned long loops) { __asm__ __volatile__( + /* + * ST40-300 appears to have an issue with this code, + * normally taking two cycles each loop, as with all + * other SH variants. If however the branch and the + * delay slot straddle an 8 byte boundary, this increases + * to 3 cycles. + * This align directive ensures this doesn't occur. + */ + ".balign 8\n\t" + "tst %0, %0\n\t" "1:\t" "bf/s 1b\n\t" diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 88d3dc3d30d5..5a580ea04429 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from, kunmap_atomic(vfrom, KM_USER0); } - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || + (vma->vm_flags & VM_EXEC)) __flush_purge_region(vto, PAGE_SIZE); kunmap_atomic(vto, KM_USER1); diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h index a2f5c61f924e..843e4faf6a50 100644 --- a/arch/sparc/include/asm/pcr.h +++ b/arch/sparc/include/asm/pcr.h @@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz) extern u64 pcr_enable; +extern int pcr_arch_init(void); + #endif /* __PCR_H */ diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a77f6c6..72509d0e34be 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, static int iommu_alloc_ctx(struct iommu *iommu) { int lowest = iommu->ctx_lowest_free; - int sz = IOMMU_NUM_CTXS - lowest; - int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); + int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); - if (unlikely(n == sz)) { + if (unlikely(n == IOMMU_NUM_CTXS)) { n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); if (unlikely(n == lowest)) { printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index ae96cf52a955..7c2ced612b8f 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -167,5 +167,3 @@ out_unregister: unregister_perf_hsvc(); return err; } - -early_initcall(pcr_arch_init); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f47040..555a76d1f4a1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -49,6 +49,7 @@ #include <asm/mdesc.h> #include <asm/ldc.h> #include <asm/hypervisor.h> +#include <asm/pcr.h> #include "cpumap.h" @@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu) void __init smp_cpus_done(unsigned int max_cpus) { + pcr_arch_init(); } void smp_send_reschedule(int cpu) diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S index 8cc03458eb7e..8f096e84a937 100644 --- a/arch/sparc/kernel/una_asm_32.S +++ b/arch/sparc/kernel/una_asm_32.S @@ -24,9 +24,9 @@ retl_efault: .globl __do_int_store __do_int_store: ld [%o2], %g1 - cmp %1, 2 + cmp %o1, 2 be 2f - cmp %1, 4 + cmp %o1, 4 be 1f srl %g1, 24, %g2 srl %g1, 16, %g7 diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 764b3eb7b604..48d00e72ce15 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -10,7 +10,7 @@ */ #include <linux/string.h> -#include <linux/bitops.h> +#include <linux/bitmap.h> #include <asm/bitext.h> @@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align) while (test_bit(offset + i, t->map) == 0) { i++; if (i == len) { - for (i = 0; i < len; i++) - __set_bit(offset + i, t->map); + bitmap_set(t->map, offset, len); if (offset == t->first_free) t->first_free = find_next_zero_bit (t->map, t->size, diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 646aa78ba5fd..46a823882437 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -62,7 +62,12 @@ int main(int argc, char *argv[]) if (fseek(f, -4L, SEEK_END)) { perror(argv[1]); } - fread(&olen, sizeof olen, 1, f); + + if (fread(&olen, sizeof(olen), 1, f) != 1) { + perror(argv[1]); + return 1; + } + ilen = ftell(f); olen = getle32(&olen); fclose(f); diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 211ca3f7fd16..4ea15ca89b2b 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -88,6 +88,7 @@ extern int acpi_disabled; extern int acpi_pci_disabled; extern int acpi_skip_timer_override; extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; extern u8 acpi_sci_flags; extern int acpi_sci_override_gsi; diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h new file mode 100644 index 000000000000..e656ad8c0a2e --- /dev/null +++ b/arch/x86/include/asm/ce4100.h @@ -0,0 +1,6 @@ +#ifndef _ASM_CE4100_H_ +#define _ASM_CE4100_H_ + +int ce4100_pci_init(void); + +#endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4d0dfa0d998e..43a18c77676d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -36,6 +36,11 @@ #define MSR_IA32_PERFCTR1 0x000000c2 #define MSR_FSB_FREQ 0x000000cd +#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 +#define NHM_C3_AUTO_DEMOTE (1UL << 25) +#define NHM_C1_AUTO_DEMOTE (1UL << 26) +#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) + #define MSR_MTRRcap 0x000000fe #define MSR_IA32_BBL_CR_CTL 0x00000119 diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index e2f6a99f14ab..cc29086e30cd 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -22,6 +22,7 @@ #define ARCH_P4_CNTRVAL_BITS (40) #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) +#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) #define P4_ESCR_EVENT_MASK 0x7e000000U #define P4_ESCR_EVENT_SHIFT 25 diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 6c22bf353f26..725b77831993 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h @@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ CMOS_WRITE(0, 0xf); - *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; + *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; } static inline void __init smpboot_setup_io_apic(void) diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index ce1d54c8a433..3e094af443c3 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -176,7 +176,7 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ + unsigned int base_dest_nodeid:15; /* nasid of the */ /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index f25bdf238a33..c61934fbf22a 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -29,8 +29,10 @@ typedef struct xpaddr { /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) -#define FOREIGN_FRAME_BIT (1UL<<31) +#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) +#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) +#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) /* Maximum amount of memory we can handle in a domain in pages */ #define MAX_DOMAIN_PAGES \ @@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order; extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); +extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +extern unsigned long set_phys_range_identity(unsigned long pfn_s, + unsigned long pfn_e); extern int m2p_add_override(unsigned long mfn, struct page *page); extern int m2p_remove_override(struct page *page); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); +#ifdef CONFIG_XEN_DEBUG_FS +extern int p2m_dump_show(struct seq_file *m, void *v); +#endif static inline unsigned long pfn_to_mfn(unsigned long pfn) { unsigned long mfn; @@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) mfn = get_phys_to_machine(pfn); if (mfn != INVALID_P2M_ENTRY) - mfn &= ~FOREIGN_FRAME_BIT; + mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); return mfn; } @@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; + int ret = 0; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; + if (unlikely((mfn >> machine_to_phys_order) != 0)) { + pfn = ~0; + goto try_override; + } pfn = 0; /* * The array access can fail (e.g., device space beyond end of RAM). * In such cases it doesn't matter what we return (we return garbage), * but we must handle the fault without crashing! */ - __get_user(pfn, &machine_to_phys_mapping[mfn]); - - /* - * If this appears to be a foreign mfn (because the pfn - * doesn't map back to the mfn), then check the local override - * table to see if there's a better pfn to use. + ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); +try_override: + /* ret might be < 0 if there are no entries in the m2p for mfn */ + if (ret < 0) + pfn = ~0; + else if (get_phys_to_machine(pfn) != mfn) + /* + * If this appears to be a foreign mfn (because the pfn + * doesn't map back to the mfn), then check the local override + * table to see if there's a better pfn to use. + * + * m2p_find_override_pfn returns ~0 if it doesn't find anything. + */ + pfn = m2p_find_override_pfn(mfn, ~0); + + /* + * pfn is ~0 if there are no entries in the m2p for mfn or if the + * entry doesn't map back to the mfn and m2p_override doesn't have a + * valid entry for it. */ - if (get_phys_to_machine(pfn) != mfn) - pfn = m2p_find_override_pfn(mfn, pfn); + if (pfn == ~0 && + get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn)) + pfn = mfn; return pfn; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a71137983a..3e6e2d68f761 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; +int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; @@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, return 0; } - if (acpi_skip_timer_override && - intsrc->source_irq == 0 && intsrc->global_irq == 2) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); - return 0; + if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { + if (acpi_skip_timer_override) { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } + if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); + } } mp_override_legacy_irq(intsrc->source_irq, diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51ef31a89be9..51d4e1663066 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void) memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { - apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; + adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; global_clock_event = &adev->evt; printk(KERN_DEBUG "%s clockevent registered as global\n", global_clock_event->name); diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 13a389179514..452932d34730 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) addr += size; } - printk(KERN_INFO "Scanning %d areas for low memory corruption\n", - num_scan_areas); + if (num_scan_areas) + printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); } @@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) { check_for_bios_corruption(); schedule_delayed_work(&bios_check_work, - round_jiffies_relative(corruption_check_period*HZ)); + round_jiffies_relative(corruption_check_period*HZ)); } static int start_periodic_check_for_corruption(void) { - if (!memory_corruption_check || corruption_check_period == 0) + if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) return 0; printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index bd1cac747f67..52c93648e492 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) { if (c->x86 == 0x06) { if (cpu_has(c, X86_FEATURE_EST)) - printk(KERN_WARNING PFX "Warning: EST-capable CPU " - "detected. The acpi-cpufreq module offers " - "voltage scaling in addition of frequency " + printk_once(KERN_WARNING PFX "Warning: EST-capable " + "CPU detected. The acpi-cpufreq module offers " + "voltage scaling in addition to frequency " "scaling. You should use that instead of " "p4-clockmod, if possible.\n"); switch (c->x86_model) { diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4f6f679f2799..4a5a42b842ad 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c @@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) cmd_incomplete: iowrite16(0, &pcch_hdr->status); spin_unlock(&pcc_lock); - return -EINVAL; + return 0; } static int pcc_cpufreq_target(struct cpufreq_policy *policy, diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 35c7e65e59be..c567dec854f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = { static int __cpuinit powernowk8_init(void) { unsigned int i, supported_cpus = 0, cpu; + int rv; for_each_online_cpu(i) { int rc; @@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void) cpb_capable = true; - register_cpu_notifier(&cpb_nb); - msrs = msrs_alloc(); if (!msrs) { printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); return -ENOMEM; } + register_cpu_notifier(&cpb_nb); + rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); for_each_cpu(cpu, cpu_online_mask) { @@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void) (cpb_enabled ? "on" : "off")); } - return cpufreq_register_driver(&cpufreq_amd64_driver); + rv = cpufreq_register_driver(&cpufreq_amd64_driver); + if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { + unregister_cpu_notifier(&cpb_nb); + msrs_free(msrs); + msrs = NULL; + } + return rv; } /* driver entry point for term */ diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index f7a0993c1e7c..ff751a9f182b 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) return 1; } - /* it might be unflagged overflow */ - rdmsrl(hwc->event_base + hwc->idx, v); - if (!(v & ARCH_P4_CNTRVAL_MASK)) + /* + * In some circumstances the overflow might issue an NMI but did + * not set P4_CCCR_OVF bit. Because a counter holds a negative value + * we simply check for high bit being set, if it's cleared it means + * the counter has reached zero value and continued counting before + * real NMI signal was received: + */ + if (!(v & ARCH_P4_UNFLAGGED_BIT)) return 1; return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 76b8cd953dee..9efbdcc56425 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func) static u32 __init ati_sbx00_rev(int num, int slot, int func) { - u32 old, d; + u32 d; - d = read_pci_config(num, slot, func, 0x70); - old = d; - d &= ~(1<<8); - write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; - write_pci_config(num, slot, func, 0x70, old); return d; } @@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func) { u32 d, rev; - if (acpi_use_timer_override) - return; - rev = ati_sbx00_rev(num, slot, func); + if (rev >= 0x40) + acpi_fix_pin2_polarity = 1; + if (rev > 0x13) return; + if (acpi_use_timer_override) + return; + /* check for IRQ0 interrupt swap */ d = read_pci_config(num, slot, func, 0x64); if (!(d & (1<<14))) diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index fc7aae1e2bc7..715037caeb43 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, + { /* Handle problems with rebooting on VersaLogic Menlow boards */ + .callback = set_bios_reboot, + .ident = "VersaLogic Menlow based board", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), + }, + }, { } }; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 54ce246a383e..63fec1531e89 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm) kvm_register_write(&svm->vcpu, reg, val); } + skip_emulated_instruction(&svm->vcpu); + return 1; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d90ceb882a4..20e3f8702d1e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -229,15 +229,14 @@ void vmalloc_sync_all(void) for (address = VMALLOC_START & PMD_MASK; address >= TASK_SIZE && address < FIXADDR_TOP; address += PMD_SIZE) { - - unsigned long flags; struct page *page; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { spinlock_t *pgt_lock; pmd_t *ret; + /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); @@ -247,7 +246,7 @@ void vmalloc_sync_all(void) if (!ret) break; } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } } @@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address); + return; + } + out_of_memory(regs, error_code, address); } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af80..c14a5422e152 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); - unsigned long flags; struct page *page; if (pgd_none(*pgd_ref)) continue; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(address); + /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); @@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_unlock(pgt_lock); } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } } diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea1551eebc..1337c51b07d7 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -780,11 +780,7 @@ void __cpuinit numa_add_cpu(int cpu) int physnid; int nid = NUMA_NO_NODE; - apicid = early_per_cpu(x86_cpu_to_apicid, cpu); - if (apicid != BAD_APICID) - nid = apicid_to_node[apicid]; - if (nid == NUMA_NO_NODE) - nid = early_cpu_to_node(cpu); + nid = early_cpu_to_node(cpu); BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); /* diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d343b3c81f3c..90825f2eb0f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; void update_page_count(int level, unsigned long pages) { - unsigned long flags; - /* Protect against CPA */ - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); direct_pages_count[level] += pages; - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } static void split_page_count(int level) @@ -394,7 +392,7 @@ static int try_preserve_large_page(pte_t *kpte, unsigned long address, struct cpa_data *cpa) { - unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; + unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; pte_t new_pte, old_pte, *tmp; pgprot_t old_prot, new_prot, req_prot; int i, do_split = 1; @@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, if (cpa->force_split) return 1; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); /* * Check for races, another CPU might have split this page * up already: @@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, } out_unlock: - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); return do_split; } static int split_large_page(pte_t *kpte, unsigned long address) { - unsigned long flags, pfn, pfninc = 1; + unsigned long pfn, pfninc = 1; unsigned int i, level; pte_t *pbase, *tmp; pgprot_t ref_prot; @@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!base) return -ENOMEM; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); /* * Check for races, another CPU might have split this page * up for us already: @@ -591,7 +589,7 @@ out_unlock: */ if (base) __free_page(base); - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); return 0; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 500242d3c96d..0113d19c8aa6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) static void pgd_dtor(pgd_t *pgd) { - unsigned long flags; /* can be called from interrupt context */ - if (SHARED_KERNEL_PMD) return; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); pgd_list_del(pgd); - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } /* @@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; pmd_t *pmds[PREALLOCATED_PMDS]; - unsigned long flags; pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); @@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) * respect to anything walking the pgd_list, so that they * never see a partially populated pgd. */ - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); pgd_ctor(mm, pgd); pgd_prepopulate_pmd(mm, pgd, pmds); - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); return pgd; diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 85b68ef5e809..9260b3eb18d4 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c @@ -34,6 +34,7 @@ #include <linux/pci.h> #include <linux/init.h> +#include <asm/ce4100.h> #include <asm/pci_x86.h> struct sim_reg { @@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = { .write = ce4100_conf_write, }; -static int __init ce4100_pci_init(void) +int __init ce4100_pci_init(void) { init_sim_regs(); raw_pci_ops = &ce4100_pci_conf; - return 0; + /* Indicate caller that it should invoke pci_legacy_init() */ + return 1; } -subsys_initcall(ce4100_pci_init); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 57afd1da491d..8634e1b49c03 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -229,21 +229,27 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev) { int rc; int share = 1; + u8 gsi; - dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); - - if (dev->irq < 0) - return -EINVAL; + rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", + rc); + return rc; + } - if (dev->irq < NR_IRQS_LEGACY) + if (gsi < NR_IRQS_LEGACY) share = 0; - rc = xen_allocate_pirq(dev->irq, share, "pcifront"); + rc = xen_allocate_pirq(gsi, share, "pcifront"); if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", - dev->irq, rc); + dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", + gsi, rc); return rc; } + + dev->irq = rc; + dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); return 0; } diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index d2c0d51a7178..cd6f184c3b3f 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -15,6 +15,7 @@ #include <linux/serial_reg.h> #include <linux/serial_8250.h> +#include <asm/ce4100.h> #include <asm/setup.h> #include <asm/io.h> @@ -129,4 +130,5 @@ void __init x86_ce4100_early_setup(void) x86_init.resources.probe_roms = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; x86_init.mpparse.find_smp_config = sdv_find_smp_config; + x86_init.pci.init = ce4100_pci_init; } diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index dab874647530..044bda5b3174 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size) * wasted bootmem) and hand off chunks of it to callers. */ res = alloc_bootmem(chunk_size); - if (!res) - return NULL; + BUG_ON(!res); prom_early_allocated += chunk_size; memset(res, 0, chunk_size); free_mem = chunk_size; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index df58e9cad96a..a7b38d35c29a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode) memset(bd2, 0, sizeof(struct bau_desc)); bd2->header.sw_ack_flag = 1; /* - * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub + * base_dest_nodeid is the nasid of the first uvhub * in the partition. The bit map will indicate uvhub numbers, * which are 0-N in a partition. Pnodes are unique system-wide. */ - bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; + bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); bd2->header.dest_subnodeid = 0x10; /* the LB */ bd2->header.command = UV_NET_ENDPOINT_INTD; bd2->header.int_both = 1; diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 5b54892e4bc3..e4343fe488ed 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -48,3 +48,11 @@ config XEN_DEBUG_FS help Enable statistics output and various tuning options in debugfs. Enabling this option may incur a significant performance overhead. + +config XEN_DEBUG + bool "Enable Xen debug checks" + depends on XEN + default n + help + Enable various WARN_ON checks in the Xen MMU code. + Enabling this option WILL incur a significant performance overhead. diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61ad574..832765c0fb8c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -46,6 +46,7 @@ #include <linux/module.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/seq_file.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> @@ -416,8 +417,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) if (val & _PAGE_PRESENT) { unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; pteval_t flags = val & PTE_FLAGS_MASK; - unsigned long mfn = pfn_to_mfn(pfn); + unsigned long mfn; + if (!xen_feature(XENFEAT_auto_translated_physmap)) + mfn = get_phys_to_machine(pfn); + else + mfn = pfn; /* * If there's no mfn for the pfn, then just create an * empty non-present pte. Unfortunately this loses @@ -427,8 +432,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) if (unlikely(mfn == INVALID_P2M_ENTRY)) { mfn = 0; flags = 0; + } else { + /* + * Paramount to do this test _after_ the + * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & + * IDENTITY_FRAME_BIT resolves to true. + */ + mfn &= ~FOREIGN_FRAME_BIT; + if (mfn & IDENTITY_FRAME_BIT) { + mfn &= ~IDENTITY_FRAME_BIT; + flags |= _PAGE_IOMAP; + } } - val = ((pteval_t)mfn << PAGE_SHIFT) | flags; } @@ -532,6 +547,41 @@ pte_t xen_make_pte(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); +#ifdef CONFIG_XEN_DEBUG +pte_t xen_make_pte_debug(pteval_t pte) +{ + phys_addr_t addr = (pte & PTE_PFN_MASK); + phys_addr_t other_addr; + bool io_page = false; + pte_t _pte; + + if (pte & _PAGE_IOMAP) + io_page = true; + + _pte = xen_make_pte(pte); + + if (!addr) + return _pte; + + if (io_page && + (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { + other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; + WARN(addr != other_addr, + "0x%lx is using VM_IO, but it is 0x%lx!\n", + (unsigned long)addr, (unsigned long)other_addr); + } else { + pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; + other_addr = (_pte.pte & PTE_PFN_MASK); + WARN((addr == other_addr) && (!io_page) && (!iomap_set), + "0x%lx is missing VM_IO (and wasn't fixed)!\n", + (unsigned long)addr); + } + + return _pte; +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); +#endif + pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); @@ -986,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm) */ void xen_mm_pin_all(void) { - unsigned long flags; struct page *page; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { if (!PagePinned(page)) { @@ -998,7 +1047,7 @@ void xen_mm_pin_all(void) } } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } /* @@ -1099,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) */ void xen_mm_unpin_all(void) { - unsigned long flags; struct page *page; - spin_lock_irqsave(&pgd_lock, flags); + spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { if (PageSavePinned(page)) { @@ -1112,7 +1160,7 @@ void xen_mm_unpin_all(void) } } - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock(&pgd_lock); } void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) @@ -1942,6 +1990,9 @@ __init void xen_ident_map_ISA(void) static __init void xen_post_allocator_init(void) { +#ifdef CONFIG_XEN_DEBUG + pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); +#endif pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; @@ -2074,7 +2125,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, in_frames[i] = virt_to_mfn(vaddr); MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); - set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); + __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); if (out_frames) out_frames[i] = virt_to_pfn(vaddr); @@ -2353,6 +2404,18 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); #ifdef CONFIG_XEN_DEBUG_FS +static int p2m_dump_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, p2m_dump_show, NULL); +} + +static const struct file_operations p2m_dump_fops = { + .open = p2m_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static struct dentry *d_mmu_debug; static int __init xen_mmu_debugfs(void) @@ -2408,6 +2471,7 @@ static int __init xen_mmu_debugfs(void) debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, &mmu_stats.prot_commit_batched); + debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); return 0; } fs_initcall(xen_mmu_debugfs); diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index fd12d7ce7ff9..00fe5604c593 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -23,6 +23,129 @@ * P2M_PER_PAGE depends on the architecture, as a mfn is always * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to * 512 and 1024 entries respectively. + * + * In short, these structures contain the Machine Frame Number (MFN) of the PFN. + * + * However not all entries are filled with MFNs. Specifically for all other + * leaf entries, or for the top root, or middle one, for which there is a void + * entry, we assume it is "missing". So (for example) + * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. + * + * We also have the possibility of setting 1-1 mappings on certain regions, so + * that: + * pfn_to_mfn(0xc0000)=0xc0000 + * + * The benefit of this is, that we can assume for non-RAM regions (think + * PCI BARs, or ACPI spaces), we can create mappings easily b/c we + * get the PFN value to match the MFN. + * + * For this to work efficiently we have one new page p2m_identity and + * allocate (via reserved_brk) any other pages we need to cover the sides + * (1GB or 4MB boundary violations). All entries in p2m_identity are set to + * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, + * no other fancy value). + * + * On lookup we spot that the entry points to p2m_identity and return the + * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. + * If the entry points to an allocated page, we just proceed as before and + * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in + * appropriate functions (pfn_to_mfn). + * + * The reason for having the IDENTITY_FRAME_BIT instead of just returning the + * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a + * non-identity pfn. To protect ourselves against we elect to set (and get) the + * IDENTITY_FRAME_BIT on all identity mapped PFNs. + * + * This simplistic diagram is used to explain the more subtle piece of code. + * There is also a digram of the P2M at the end that can help. + * Imagine your E820 looking as so: + * + * 1GB 2GB + * /-------------------+---------\/----\ /----------\ /---+-----\ + * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | + * \-------------------+---------/\----/ \----------/ \---+-----/ + * ^- 1029MB ^- 2001MB + * + * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), + * 2048MB = 524288 (0x80000)] + * + * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB + * is actually not present (would have to kick the balloon driver to put it in). + * + * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: + * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start + * of the PFN and the end PFN (263424 and 512256 respectively). The first step + * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page + * covers 512^2 of page estate (1GB) and in case the start or end PFN is not + * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn + * to end pfn. We reserve_brk top leaf pages if they are missing (means they + * point to p2m_mid_missing). + * + * With the E820 example above, 263424 is not 1GB aligned so we allocate a + * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. + * Each entry in the allocate page is "missing" (points to p2m_missing). + * + * Next stage is to determine if we need to do a more granular boundary check + * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. + * We check if the start pfn and end pfn violate that boundary check, and if + * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer + * granularity of setting which PFNs are missing and which ones are identity. + * In our example 263424 and 512256 both fail the check so we reserve_brk two + * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" + * values) and assign them to p2m[1][2] and p2m[1][488] respectively. + * + * At this point we would at minimum reserve_brk one page, but could be up to + * three. Each call to set_phys_range_identity has at maximum a three page + * cost. If we were to query the P2M at this stage, all those entries from + * start PFN through end PFN (so 1029MB -> 2001MB) would return + * INVALID_P2M_ENTRY ("missing"). + * + * The next step is to walk from the start pfn to the end pfn setting + * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. + * If we find that the middle leaf is pointing to p2m_missing we can swap it + * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this + * point we do not need to worry about boundary aligment (so no need to + * reserve_brk a middle page, figure out which PFNs are "missing" and which + * ones are identity), as that has been done earlier. If we find that the + * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference + * that page (which covers 512 PFNs) and set the appropriate PFN with + * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we + * set from p2m[1][2][256->511] and p2m[1][488][0->256] with + * IDENTITY_FRAME_BIT set. + * + * All other regions that are void (or not filled) either point to p2m_missing + * (considered missing) or have the default value of INVALID_P2M_ENTRY (also + * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] + * contain the INVALID_P2M_ENTRY value and are considered "missing." + * + * This is what the p2m ends up looking (for the E820 above) with this + * fabulous drawing: + * + * p2m /--------------\ + * /-----\ | &mfn_list[0],| /-----------------\ + * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | + * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | + * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | + * |-----| \ | [p2m_identity]+\\ | .... | + * | 2 |--\ \-------------------->| ... | \\ \----------------/ + * |-----| \ \---------------/ \\ + * | 3 |\ \ \\ p2m_identity + * |-----| \ \-------------------->/---------------\ /-----------------\ + * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | + * \-----/ / | [p2m_identity]+-->| ..., ~0 | + * / /---------------\ | .... | \-----------------/ + * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | + * / | IDENTITY[@256]|<----/ \---------------/ + * / | ~0, ~0, .... | + * | \---------------/ + * | + * p2m_missing p2m_missing + * /------------------\ /------------\ + * | [p2m_mid_missing]+---->| ~0, ~0, ~0 | + * | [p2m_mid_missing]+---->| ..., ~0 | + * \------------------/ \------------/ + * + * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) */ #include <linux/init.h> @@ -30,6 +153,7 @@ #include <linux/list.h> #include <linux/hash.h> #include <linux/sched.h> +#include <linux/seq_file.h> #include <asm/cache.h> #include <asm/setup.h> @@ -59,9 +183,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); + RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); +/* We might hit two boundary violations at the start and end, at max each + * boundary violation will require three middle nodes. */ +RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -221,6 +351,9 @@ void __init xen_build_dynamic_phys_to_machine(void) p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); + p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_init(p2m_identity); + /* * The domain builder gives us a pre-constructed p2m array in * mfn_list for all the pages initially given to us, so we just @@ -266,6 +399,14 @@ unsigned long get_phys_to_machine(unsigned long pfn) mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); + /* + * The INVALID_P2M_ENTRY is filled in both p2m_*identity + * and in p2m_*missing, so returning the INVALID_P2M_ENTRY + * would be wrong. + */ + if (p2m_top[topidx][mididx] == p2m_identity) + return IDENTITY_FRAME(pfn); + return p2m_top[topidx][mididx][idx]; } EXPORT_SYMBOL_GPL(get_phys_to_machine); @@ -335,9 +476,11 @@ static bool alloc_p2m(unsigned long pfn) p2m_top_mfn_p[topidx] = mid_mfn; } - if (p2m_top[topidx][mididx] == p2m_missing) { + if (p2m_top[topidx][mididx] == p2m_identity || + p2m_top[topidx][mididx] == p2m_missing) { /* p2m leaf page is missing */ unsigned long *p2m; + unsigned long *p2m_orig = p2m_top[topidx][mididx]; p2m = alloc_p2m_page(); if (!p2m) @@ -345,7 +488,7 @@ static bool alloc_p2m(unsigned long pfn) p2m_init(p2m); - if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) + if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) free_p2m_page(p2m); else mid_mfn[mididx] = virt_to_mfn(p2m); @@ -354,11 +497,91 @@ static bool alloc_p2m(unsigned long pfn) return true; } +bool __early_alloc_p2m(unsigned long pfn) +{ + unsigned topidx, mididx, idx; + + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); + idx = p2m_index(pfn); + + /* Pfff.. No boundary cross-over, lets get out. */ + if (!idx) + return false; + + WARN(p2m_top[topidx][mididx] == p2m_identity, + "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", + topidx, mididx); + + /* + * Could be done by xen_build_dynamic_phys_to_machine.. + */ + if (p2m_top[topidx][mididx] != p2m_missing) + return false; + + /* Boundary cross-over for the edges: */ + if (idx) { + unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); + + p2m_init(p2m); + + p2m_top[topidx][mididx] = p2m; + + } + return idx != 0; +} +unsigned long set_phys_range_identity(unsigned long pfn_s, + unsigned long pfn_e) +{ + unsigned long pfn; + + if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) + return 0; + + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) + return pfn_e - pfn_s; + + if (pfn_s > pfn_e) + return 0; + + for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); + pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); + pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) + { + unsigned topidx = p2m_top_index(pfn); + if (p2m_top[topidx] == p2m_mid_missing) { + unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); + + p2m_mid_init(mid); + + p2m_top[topidx] = mid; + } + } + + __early_alloc_p2m(pfn_s); + __early_alloc_p2m(pfn_e); + + for (pfn = pfn_s; pfn < pfn_e; pfn++) + if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) + break; + + if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), + "Identity mapping failed. We are %ld short of 1-1 mappings!\n", + (pfn_e - pfn_s) - (pfn - pfn_s))) + printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); + + return pfn - pfn_s; +} + /* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { + BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); + return true; + } if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; @@ -368,6 +591,21 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); + /* For sparse holes were the p2m leaf has real PFN along with + * PCI holes, stick in the PFN as the MFN value. + */ + if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { + if (p2m_top[topidx][mididx] == p2m_identity) + return true; + + /* Swap over from MISSING to IDENTITY if needed. */ + if (p2m_top[topidx][mididx] == p2m_missing) { + WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, + p2m_identity) != p2m_missing); + return true; + } + } + if (p2m_top[topidx][mididx] == p2m_missing) return mfn == INVALID_P2M_ENTRY; @@ -378,11 +616,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return true; - } - if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!alloc_p2m(pfn)) return false; @@ -520,3 +753,80 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) return ret; } EXPORT_SYMBOL_GPL(m2p_find_override_pfn); + +#ifdef CONFIG_XEN_DEBUG_FS + +int p2m_dump_show(struct seq_file *m, void *v) +{ + static const char * const level_name[] = { "top", "middle", + "entry", "abnormal" }; + static const char * const type_name[] = { "identity", "missing", + "pfn", "abnormal"}; +#define TYPE_IDENTITY 0 +#define TYPE_MISSING 1 +#define TYPE_PFN 2 +#define TYPE_UNKNOWN 3 + unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; + unsigned int uninitialized_var(prev_level); + unsigned int uninitialized_var(prev_type); + + if (!p2m_top) + return 0; + + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { + unsigned topidx = p2m_top_index(pfn); + unsigned mididx = p2m_mid_index(pfn); + unsigned idx = p2m_index(pfn); + unsigned lvl, type; + + lvl = 4; + type = TYPE_UNKNOWN; + if (p2m_top[topidx] == p2m_mid_missing) { + lvl = 0; type = TYPE_MISSING; + } else if (p2m_top[topidx] == NULL) { + lvl = 0; type = TYPE_UNKNOWN; + } else if (p2m_top[topidx][mididx] == NULL) { + lvl = 1; type = TYPE_UNKNOWN; + } else if (p2m_top[topidx][mididx] == p2m_identity) { + lvl = 1; type = TYPE_IDENTITY; + } else if (p2m_top[topidx][mididx] == p2m_missing) { + lvl = 1; type = TYPE_MISSING; + } else if (p2m_top[topidx][mididx][idx] == 0) { + lvl = 2; type = TYPE_UNKNOWN; + } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { + lvl = 2; type = TYPE_IDENTITY; + } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { + lvl = 2; type = TYPE_MISSING; + } else if (p2m_top[topidx][mididx][idx] == pfn) { + lvl = 2; type = TYPE_PFN; + } else if (p2m_top[topidx][mididx][idx] != pfn) { + lvl = 2; type = TYPE_PFN; + } + if (pfn == 0) { + prev_level = lvl; + prev_type = type; + } + if (pfn == MAX_DOMAIN_PAGES-1) { + lvl = 3; + type = TYPE_UNKNOWN; + } + if (prev_type != type) { + seq_printf(m, " [0x%lx->0x%lx] %s\n", + prev_pfn_type, pfn, type_name[prev_type]); + prev_pfn_type = pfn; + prev_type = type; + } + if (prev_level != lvl) { + seq_printf(m, " [0x%lx->0x%lx] level %s\n", + prev_pfn_level, pfn, level_name[prev_level]); + prev_pfn_level = pfn; + prev_level = lvl; + } + } + return 0; +#undef TYPE_IDENTITY +#undef TYPE_MISSING +#undef TYPE_PFN +#undef TYPE_UNKNOWN +} +#endif diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index a8a66a50d446..fa0269a99377 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; static __init void xen_add_extra_mem(unsigned long pages) { + unsigned long pfn; + u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; @@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages) xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); + + for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } static unsigned long __init xen_release_chunk(phys_addr_t start_addr, @@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", start, end, ret); if (ret == 1) { - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; } } @@ -138,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, return released; } +static unsigned long __init xen_set_identity(const struct e820entry *list, + ssize_t map_size) +{ + phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; + phys_addr_t start_pci = last; + const struct e820entry *entry; + unsigned long identity = 0; + int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + phys_addr_t start = entry->addr; + phys_addr_t end = start + entry->size; + + if (start < last) + start = last; + + if (end <= start) + continue; + + /* Skip over the 1MB region. */ + if (last > end) + continue; + + if (entry->type == E820_RAM) { + if (start > start_pci) + identity += set_phys_range_identity( + PFN_UP(start_pci), PFN_DOWN(start)); + + /* Without saving 'last' we would gooble RAM too + * at the end of the loop. */ + last = end; + start_pci = end; + continue; + } + start_pci = min(start, start_pci); + last = end; + } + if (last > start_pci) + identity += set_phys_range_identity( + PFN_UP(start_pci), PFN_DOWN(last)); + return identity; +} /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; + static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; @@ -151,6 +199,7 @@ char * __init xen_memory_setup(void) struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; + unsigned long identity_pages = 0; int i; int op; @@ -176,6 +225,7 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { @@ -194,6 +244,15 @@ char * __init xen_memory_setup(void) end -= delta; extra_pages += PFN_DOWN(delta); + /* + * Set RAM below 4GB that is not for us to be unusable. + * This prevents "System RAM" address space from being + * used as potential resource for I/O address (happens + * when 'allocate_resource' is called). + */ + if (delta && + (xen_initial_domain() && end < 0x100000000ULL)) + e820_add_region(end, delta, E820_UNUSABLE); } if (map[i].size > 0 && end > xen_extra_mem_start) @@ -251,6 +310,13 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(extra_pages); + /* + * Set P2M for all non-RAM pages and E820 gaps to be identity + * type PFNs. We supply it with the non-sanitized version + * of the E820. + */ + identity_pages = xen_set_identity(map_raw, memmap.nr_entries); + printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 067759e3d6a5..2e2d370a47b1 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -397,7 +397,9 @@ void xen_setup_timer(int cpu) name = "<timer kasprintf failed>"; irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, + IRQF_DISABLED|IRQF_PERCPU| + IRQF_NOBALANCING|IRQF_TIMER| + IRQF_FORCE_RESUME, name, NULL); evt = &per_cpu(xen_clock_events, cpu); diff --git a/block/blk-core.c b/block/blk-core.c index 2f4002f79a24..518dd423a5fe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q) WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q); + __blk_run_queue(q, false); } EXPORT_SYMBOL(blk_start_queue); @@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue); /** * __blk_run_queue - run a single device queue * @q: The queue to run + * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. * * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. * */ -void __blk_run_queue(struct request_queue *q) +void __blk_run_queue(struct request_queue *q, bool force_kblockd) { blk_remove_plug(q); @@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q) * Only recurse once to avoid overrunning the stack, let the unplug * handling reinvoke the handler shortly if we already got there. */ - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { + if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { q->request_fn(q); queue_flag_clear(QUEUE_FLAG_REENTER, q); } else { @@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - __blk_run_queue(q); + __blk_run_queue(q, false); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); @@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, drive_stat_acct(rq, 1); __elv_add_request(q, rq, where, 0); - __blk_run_queue(q); + __blk_run_queue(q, false); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); @@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *dwork, unsigned long delay) -{ - return queue_delayed_work(kblockd_workqueue, dwork, delay); -} -EXPORT_SYMBOL(kblockd_schedule_delayed_work); - int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * diff --git a/block/blk-flush.c b/block/blk-flush.c index 54b123d6563e..b27d0208611b 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, /* * Moving a request silently to empty queue_head may stall the - * queue. Kick the queue in those cases. + * queue. Kick the queue in those cases. This function is called + * from request completion path and calling directly into + * request_fn may confuse the driver. Always use kblockd. */ if (was_empty && next_rq) - __blk_run_queue(q); + __blk_run_queue(q, true); } static void pre_flush_end_io(struct request *rq, int error) @@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q) BUG(); } - elv_insert(q, rq, ELEVATOR_INSERT_FRONT); + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); return rq; } diff --git a/block/blk-lib.c b/block/blk-lib.c index 1a320d2406b0..bd3e8df4d5e2 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -109,7 +109,6 @@ struct bio_batch atomic_t done; unsigned long flags; struct completion *wait; - bio_end_io_t *end_io; }; static void bio_batch_end_io(struct bio *bio, int err) @@ -122,17 +121,14 @@ static void bio_batch_end_io(struct bio *bio, int err) else clear_bit(BIO_UPTODATE, &bb->flags); } - if (bb) { - if (bb->end_io) - bb->end_io(bio, err); - atomic_inc(&bb->done); - complete(bb->wait); - } + if (bb) + if (atomic_dec_and_test(&bb->done)) + complete(bb->wait); bio_put(bio); } /** - * blkdev_issue_zeroout generate number of zero filed write bios + * blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write @@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, int ret; struct bio *bio; struct bio_batch bb; - unsigned int sz, issued = 0; + unsigned int sz; DECLARE_COMPLETION_ONSTACK(wait); - atomic_set(&bb.done, 0); + atomic_set(&bb.done, 1); bb.flags = 1 << BIO_UPTODATE; bb.wait = &wait; - bb.end_io = NULL; submit: ret = 0; @@ -185,12 +180,12 @@ submit: break; } ret = 0; - issued++; + atomic_inc(&bb.done); submit_bio(WRITE, bio); } /* Wait for bios in-flight */ - while (issued != atomic_read(&bb.done)) + if (!atomic_dec_and_test(&bb.done)) wait_for_completion(&wait); if (!test_bit(BIO_UPTODATE, &bb.flags)) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a89043a3caa4..e36cc10a346c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -20,6 +20,11 @@ static int throtl_quantum = 32; /* Throttling is performed over 100ms slice and after that slice is renewed */ static unsigned long throtl_slice = HZ/10; /* 100 ms */ +/* A workqueue to queue throttle related work */ +static struct workqueue_struct *kthrotld_workqueue; +static void throtl_schedule_delayed_work(struct throtl_data *td, + unsigned long delay); + struct throtl_rb_root { struct rb_root rb; struct rb_node *left; @@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td) update_min_dispatch_time(st); if (time_before_eq(st->min_disptime, jiffies)) - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); else - throtl_schedule_delayed_work(td->queue, - (st->min_disptime - jiffies)); + throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); } static inline void @@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work) } /* Call with queue lock held */ -void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) +static void +throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) { - struct throtl_data *td = q->td; struct delayed_work *dwork = &td->throtl_work; if (total_nr_queued(td) > 0) { @@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) * Cancel that and schedule a new one. */ __cancel_delayed_work(dwork); - kblockd_schedule_delayed_work(q, dwork, delay); + queue_delayed_work(kthrotld_workqueue, dwork, delay); throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); } } -EXPORT_SYMBOL(throtl_schedule_delayed_work); static void throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) @@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key, smp_mb__after_atomic_inc(); /* Schedule a work now to process the limit change */ - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_update_blkio_group_write_bps(void *key, @@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key, smp_mb__before_atomic_inc(); atomic_inc(&td->limits_changed); smp_mb__after_atomic_inc(); - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_update_blkio_group_read_iops(void *key, @@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key, smp_mb__before_atomic_inc(); atomic_inc(&td->limits_changed); smp_mb__after_atomic_inc(); - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_update_blkio_group_write_iops(void *key, @@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key, smp_mb__before_atomic_inc(); atomic_inc(&td->limits_changed); smp_mb__after_atomic_inc(); - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } void throtl_shutdown_timer_wq(struct request_queue *q) @@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q) static int __init throtl_init(void) { + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); + if (!kthrotld_workqueue) + panic("Failed to create kthrotld\n"); + blkio_policy_register(&blkio_policy_throtl); return 0; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 7be4c7959625..ea83a4f0c27d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); - __blk_run_queue(cfqd->queue); + __blk_run_queue(cfqd->queue, false); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); @@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - __blk_run_queue(cfqd->queue); + __blk_run_queue(cfqd->queue, false); } } @@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work) struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); - __blk_run_queue(cfqd->queue); + __blk_run_queue(cfqd->queue, false); spin_unlock_irq(q->queue_lock); } diff --git a/block/elevator.c b/block/elevator.c index 2569512830d3..236e93c1f46c 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) */ elv_drain_elevator(q); while (q->rq.elvpriv) { - __blk_run_queue(q); + __blk_run_queue(q, false); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); @@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) * with anything. There's no point in delaying queue * processing. */ - __blk_run_queue(q); + __blk_run_queue(q, false); break; case ELEVATOR_INSERT_SORT: diff --git a/block/genhd.c b/block/genhd.c index 6a5b772aa201..cbf1112a885c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno) struct block_device *bdev = bdget_disk(disk, partno); if (bdev) { fsync_bdev(bdev); - res = __invalidate_device(bdev); + res = __invalidate_device(bdev, true); bdput(bdev); } return res; diff --git a/block/ioctl.c b/block/ioctl.c index 9049d460fa89..1124cd297263 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -EINVAL; if (get_user(n, (int __user *) arg)) return -EFAULT; - if (!(mode & FMODE_EXCL) && - blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) - return -EBUSY; + if (!(mode & FMODE_EXCL)) { + bdgrab(bdev); + if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) + return -EBUSY; + } ret = set_blocksize(bdev, n); if (!(mode & FMODE_EXCL)) blkdev_put(bdev, mode | FMODE_EXCL); diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 54784bb42cec..edc25867ad9d 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -416,10 +416,15 @@ struct acpi_gpe_handler_info { u8 originally_enabled; /* True if GPE was originally enabled */ }; +struct acpi_gpe_notify_object { + struct acpi_namespace_node *node; + struct acpi_gpe_notify_object *next; +}; + union acpi_gpe_dispatch_info { struct acpi_namespace_node *method_node; /* Method node for this GPE level */ struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ - struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ + struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ }; /* diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 14988a86066f..f4725212eb48 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) acpi_status status; struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_evaluate_info *info; + struct acpi_gpe_notify_object *notify_object; ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); @@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) * from this thread -- because handlers may in turn run other * control methods. */ - status = - acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. - device_node, - ACPI_NOTIFY_DEVICE_WAKE); + status = acpi_ev_queue_notify_request( + local_gpe_event_info->dispatch.device.node, + ACPI_NOTIFY_DEVICE_WAKE); + + notify_object = local_gpe_event_info->dispatch.device.next; + while (ACPI_SUCCESS(status) && notify_object) { + status = acpi_ev_queue_notify_request( + notify_object->node, + ACPI_NOTIFY_DEVICE_WAKE); + notify_object = notify_object->next; + } + break; case ACPI_GPE_DISPATCH_METHOD: diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 3b20a3401b64..52aaff3df562 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, acpi_status status = AE_BAD_PARAMETER; struct acpi_gpe_event_info *gpe_event_info; struct acpi_namespace_node *device_node; + struct acpi_gpe_notify_object *notify_object; acpi_cpu_flags flags; + u8 gpe_dispatch_mask; ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); @@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, goto unlock_and_exit; } + if (wake_device == ACPI_ROOT_OBJECT) { + goto out; + } + /* * If there is no method or handler for this GPE, then the * wake_device will be notified whenever this GPE fires (aka * "implicit notify") Note: The GPE is assumed to be * level-triggered (for windows compatibility). */ - if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == - ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { + gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; + if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE + && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { + goto out; + } - /* Validate wake_device is of type Device */ + /* Validate wake_device is of type Device */ - device_node = ACPI_CAST_PTR(struct acpi_namespace_node, - wake_device); - if (device_node->type != ACPI_TYPE_DEVICE) { - goto unlock_and_exit; - } + device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); + if (device_node->type != ACPI_TYPE_DEVICE) { + goto unlock_and_exit; + } + + if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED); - gpe_event_info->dispatch.device_node = device_node; + gpe_event_info->dispatch.device.node = device_node; + gpe_event_info->dispatch.device.next = NULL; + } else { + /* There are multiple devices to notify implicitly. */ + + notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); + if (!notify_object) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + notify_object->node = device_node; + notify_object->next = gpe_event_info->dispatch.device.next; + gpe_event_info->dispatch.device.next = notify_object; } + out: gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; status = AE_OK; diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 5df67f1d6c61..384f7abcff77 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c @@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, size_t count, loff_t *ppos) { static char *buf; - static int uncopied_bytes; + static u32 max_size; + static u32 uncopied_bytes; + struct acpi_table_header table; acpi_status status; @@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, if (copy_from_user(&table, user_buf, sizeof(struct acpi_table_header))) return -EFAULT; - uncopied_bytes = table.length; - buf = kzalloc(uncopied_bytes, GFP_KERNEL); + uncopied_bytes = max_size = table.length; + buf = kzalloc(max_size, GFP_KERNEL); if (!buf) return -ENOMEM; } - if (uncopied_bytes < count) { - kfree(buf); + if (buf == NULL) + return -EINVAL; + + if ((*ppos > max_size) || + (*ppos + count > max_size) || + (*ppos + count < count) || + (count > uncopied_bytes)) return -EINVAL; - } if (copy_from_user(buf + (*ppos), user_buf, count)) { kfree(buf); + buf = NULL; return -EFAULT; } @@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, if (!uncopied_bytes) { status = acpi_install_method(buf); kfree(buf); + buf = NULL; if (ACPI_FAILURE(status)) return -EINVAL; add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 73fb1c4f4cd4..25ef1a4556e6 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc) } skb = alloc_skb(sizeof(*header), GFP_ATOMIC); - if (!skb && net_ratelimit()) { - dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); + if (!skb) { + if (net_ratelimit()) + dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b9ba04fc2b34..77fc76f8aea9 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, struct block_device *bdev = opened_bdev[cnt]; if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) continue; - __invalidate_device(bdev); + __invalidate_device(bdev, true); } mutex_unlock(&open_lock); } else { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 49e6a545eb63..dbf31ec9114d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -78,7 +78,6 @@ #include <asm/uaccess.h> -static DEFINE_MUTEX(loop_mutex); static LIST_HEAD(loop_devices); static DEFINE_MUTEX(loop_devices_mutex); @@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo = bdev->bd_disk->private_data; - mutex_lock(&loop_mutex); mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); - mutex_unlock(&loop_mutex); return 0; } @@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) struct loop_device *lo = disk->private_data; int err; - mutex_lock(&loop_mutex); mutex_lock(&lo->lo_ctl_mutex); if (--lo->lo_refcnt) @@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) out: mutex_unlock(&lo->lo_ctl_mutex); out_unlocked: - mutex_unlock(&loop_mutex); return 0; } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a126e614601f..6dcd55a74c0a 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3002) }, + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03F0, 0x311D) }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE02C) }, { } /* Terminating entry */ }; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1da773f899a2..700a3840fddc 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = { /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, + /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work) if (hdev->conn_hash.sco_num > 0) { if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { - err = usb_autopm_get_interface(data->isoc); + err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); if (err < 0) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); @@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work) __set_isoc_interface(hdev, 0); if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) - usb_autopm_put_interface(data->isoc); + usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); } } @@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf, usb_set_intfdata(intf, data); - usb_enable_autosuspend(interface_to_usbdev(intf)); - return 0; } diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 9252e85706ef..780498d76581 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -773,18 +773,23 @@ int __init agp_amd64_init(void) #else printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); #endif + pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } /* First check that we have at least one AMD64 NB */ - if (!pci_dev_present(amd_nb_misc_ids)) + if (!pci_dev_present(amd_nb_misc_ids)) { + pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; + } /* Look for any AGP bridge */ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; err = driver_attach(&agp_amd64_pci_driver.driver); - if (err == 0 && agp_bridges_found == 0) + if (err == 0 && agp_bridges_found == 0) { + pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; + } } return err; } diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c195bfeade11..5feebe2800e9 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -130,6 +130,7 @@ #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) #define I915_IFPADDR 0x60 +#define I830_HIC 0x70 /* Intel 965G registers */ #define I965_MSAC 0x62 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fab3d3265adb..0d09b537bb9a 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> +#include <linux/delay.h> #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" @@ -70,12 +71,8 @@ static struct _intel_private { u32 __iomem *gtt; /* I915G */ bool clear_fake_agp; /* on first access via agp, fill with scratch */ int num_dcache_entries; - union { - void __iomem *i9xx_flush_page; - void *i8xx_flush_page; - }; + void __iomem *i9xx_flush_page; char *i81x_gtt_table; - struct page *i8xx_page; struct resource ifp_resource; int resource_valid; struct page *scratch_page; @@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void) static void i830_cleanup(void) { - if (intel_private.i8xx_flush_page) { - kunmap(intel_private.i8xx_flush_page); - intel_private.i8xx_flush_page = NULL; - } - - __free_page(intel_private.i8xx_page); - intel_private.i8xx_page = NULL; -} - -static void intel_i830_setup_flush(void) -{ - /* return if we've already set the flush mechanism up */ - if (intel_private.i8xx_page) - return; - - intel_private.i8xx_page = alloc_page(GFP_KERNEL); - if (!intel_private.i8xx_page) - return; - - intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); - if (!intel_private.i8xx_flush_page) - i830_cleanup(); } /* The chipset_flush interface needs to get data that has already been @@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void) */ static void i830_chipset_flush(void) { - unsigned int *pg = intel_private.i8xx_flush_page; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + /* Forcibly evict everything from the CPU write buffers. + * clflush appears to be insufficient. + */ + wbinvd_on_all_cpus(); + + /* Now we've only seen documents for this magic bit on 855GM, + * we hope it exists for the other gen2 chipsets... + * + * Also works as advertised on my 845G. + */ + writel(readl(intel_private.registers+I830_HIC) | (1<<31), + intel_private.registers+I830_HIC); - memset(pg, 0, 1024); + while (readl(intel_private.registers+I830_HIC) & (1<<31)) { + if (time_after(jiffies, timeout)) + break; - if (cpu_has_clflush) - clflush_cache_range(pg, 1024); - else if (wbinvd_on_all_cpus() != 0) - printk(KERN_ERR "Timed out waiting for cache flush.\n"); + udelay(50); + } } static void i830_write_entry(dma_addr_t addr, unsigned int entry, @@ -849,8 +837,6 @@ static int i830_setup(void) intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; - intel_i830_setup_flush(); - return 0; } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 7855f9f45b8e..62787e30d508 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -900,6 +900,14 @@ static void sender(void *send_info, printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif + /* + * last_timeout_jiffies is updated here to avoid + * smi_timeout() handler passing very large time_diff + * value to smi_event_handler() that causes + * the send command to abort. + */ + smi_info->last_timeout_jiffies = jiffies; + mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); if (smi_info->thread) diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 777181a2e603..bcbbc71febb7 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -830,8 +830,7 @@ static void monitor_card(unsigned long p) test_bit(IS_ANY_T1, &dev->flags))) { DEBUGP(4, dev, "Perform AUTOPPS\n"); set_bit(IS_AUTOPPS_ACT, &dev->flags); - ptsreq.protocol = ptsreq.protocol = - (0x01 << dev->proto); + ptsreq.protocol = (0x01 << dev->proto); ptsreq.flags = 0x01; ptsreq.pts1 = 0x00; ptsreq.pts2 = 0x00; diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index 94b8eb4d691d..444155a305ae 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c @@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data) static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) { struct ipw_dev *ipw = priv_data; - struct resource *io_resource; int ret; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; @@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) if (ret) return ret; - io_resource = request_region(p_dev->resource[0]->start, - resource_size(p_dev->resource[0]), - IPWIRELESS_PCCARD_NAME); + if (!request_region(p_dev->resource[0]->start, + resource_size(p_dev->resource[0]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit; + } p_dev->resource[2]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; @@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); if (ret != 0) - goto exit2; + goto exit1; ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; - ipw->attr_memory = ioremap(p_dev->resource[2]->start, + ipw->common_memory = ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); - request_mem_region(p_dev->resource[2]->start, - resource_size(p_dev->resource[2]), - IPWIRELESS_PCCARD_NAME); + if (!request_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit2; + } p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); if (ret != 0) - goto exit2; + goto exit3; ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); if (ret != 0) @@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ipw->attr_memory = ioremap(p_dev->resource[3]->start, resource_size(p_dev->resource[3])); - request_mem_region(p_dev->resource[3]->start, - resource_size(p_dev->resource[3]), - IPWIRELESS_PCCARD_NAME); + if (!request_mem_region(p_dev->resource[3]->start, + resource_size(p_dev->resource[3]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit4; + } return 0; +exit4: + iounmap(ipw->attr_memory); exit3: + release_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); exit2: - if (ipw->common_memory) { - release_mem_region(p_dev->resource[2]->start, - resource_size(p_dev->resource[2])); - iounmap(ipw->common_memory); - } + iounmap(ipw->common_memory); exit1: - release_resource(io_resource); + release_region(p_dev->resource[0]->start, + resource_size(p_dev->resource[0])); +exit: pcmcia_disable_device(p_dev); - return -1; + return ret; } static int config_ipwireless(struct ipw_dev *ipw) @@ -219,6 +229,8 @@ exit: static void release_ipwireless(struct ipw_dev *ipw) { + release_region(ipw->link->resource[0]->start, + resource_size(ipw->link->resource[0])); if (ipw->common_memory) { release_mem_region(ipw->link->resource[2]->start, resource_size(ipw->link->resource[2])); diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index faf5a2c65926..1f46f1cd9225 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, tpm_protected_ordinal_duration[ordinal & TPM_PROTECTED_ORDINAL_MASK]; - if (duration_idx != TPM_UNDEFINED) { + if (duration_idx != TPM_UNDEFINED) duration = chip->vendor.duration[duration_idx]; - /* if duration is 0, it's because chip->vendor.duration wasn't */ - /* filled yet, so we set the lowest timeout just to give enough */ - /* time for tpm_get_timeouts() to succeed */ - return (duration <= 0 ? HZ : duration); - } else + if (duration <= 0) return 2 * 60 * HZ; + else + return duration; } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); @@ -577,11 +575,9 @@ duration: if (rc) return; - if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || - be32_to_cpu(tpm_cmd.header.out.length) - != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) + if (be32_to_cpu(tpm_cmd.header.out.return_code) + != 3 * sizeof(u32)) return; - duration_cap = &tpm_cmd.params.getcap_out.cap.duration; chip->vendor.duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); @@ -941,18 +937,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev, } EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); -ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - - return sprintf(buf, "%d %d %d\n", - jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), - jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), - jiffies_to_usecs(chip->vendor.duration[TPM_LONG])); -} -EXPORT_SYMBOL_GPL(tpm_show_timeouts); - ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index d84ff772c26f..72ddb031b69a 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr, char *); extern ssize_t tpm_show_temp_deactivated(struct device *, struct device_attribute *attr, char *); -extern ssize_t tpm_show_timeouts(struct device *, - struct device_attribute *attr, char *); struct tpm_chip; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 0d1d38e5f266..dd21df55689d 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); -static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); static struct attribute *tis_attrs[] = { &dev_attr_pubek.attr, @@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = { &dev_attr_owned.attr, &dev_attr_temp_deactivated.attr, &dev_attr_caps.attr, - &dev_attr_cancel.attr, - &dev_attr_timeouts.attr, NULL, + &dev_attr_cancel.attr, NULL, }; static struct attribute_group tis_attr_grp = { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 490393186338..84b164d1eb2b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -388,6 +388,10 @@ static void discard_port_data(struct port *port) unsigned int len; int ret; + if (!port->portdev) { + /* Device has been unplugged. vqs are already gone. */ + return; + } vq = port->in_vq; if (port->inbuf) buf = port->inbuf; @@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port) void *buf; unsigned int len; + if (!port->portdev) { + /* Device has been unplugged. vqs are already gone. */ + return; + } while ((buf = virtqueue_get_buf(port->out_vq, &len))) { kfree(buf); port->outvq_full = false; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1109f6848a43..5cb4d09919d6 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) ret = sysdev_driver_register(&cpu_sysdev_class, &cpufreq_sysdev_driver); + if (ret) + goto err_null_driver; - if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { + if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { int i; ret = -ENODEV; @@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (ret) { dprintk("no CPU initialized for driver %s\n", driver_data->name); - sysdev_driver_unregister(&cpu_sysdev_class, - &cpufreq_sysdev_driver); - - spin_lock_irqsave(&cpufreq_driver_lock, flags); - cpufreq_driver = NULL; - spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + goto err_sysdev_unreg; } } - if (!ret) { - register_hotcpu_notifier(&cpufreq_cpu_notifier); - dprintk("driver %s up and running\n", driver_data->name); - cpufreq_debug_enable_ratelimit(); - } + register_hotcpu_notifier(&cpufreq_cpu_notifier); + dprintk("driver %s up and running\n", driver_data->name); + cpufreq_debug_enable_ratelimit(); + return 0; +err_sysdev_unreg: + sysdev_driver_unregister(&cpu_sysdev_class, + &cpufreq_sysdev_driver); +err_null_driver: + spin_lock_irqsave(&cpufreq_driver_lock, flags); + cpufreq_driver = NULL; + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c index cead8e6ff345..7f6f01a4b145 100644 --- a/drivers/gpio/ml_ioh_gpio.c +++ b/drivers/gpio/ml_ioh_gpio.c @@ -326,6 +326,7 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = { { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, { 0, } }; +MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id); static struct pci_driver ioh_gpio_driver = { .name = "ml_ioh_gpio", diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c index 0eba0a75c804..2c6af8705103 100644 --- a/drivers/gpio/pch_gpio.c +++ b/drivers/gpio/pch_gpio.c @@ -286,6 +286,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, { 0, } }; +MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id); static struct pci_driver pch_gpio_driver = { .name = "pch_gpio", diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6977a1ce9d98..f73ef4390db6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; - int i, rc = 0; + int i, j, rc = 0; int start; for (i = 0; i < fb_helper->crtc_count; i++) { @@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) transp = cmap->transp; start = cmap->start; - for (i = 0; i < cmap->len; i++) { + for (j = 0; j < cmap->len; j++) { u16 hred, hgreen, hblue, htransp = 0xffff; hred = *red++; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3dadfa2a8528..28d1d3c24d65 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * available. In that case we can't account for this and just * hope for the best. */ - if ((vblrc > 0) && (abs(diff_ns) > 1000000)) + if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { atomic_inc(&dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); + } /* Invalidate all timestamps while vblank irq's are off. */ clear_vblank_timestamps(dev, crtc); @@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) /* Dot clock in Hz: */ dotclock = (u64) crtc->hwmode.clock * 1000; + /* Fields of interlaced scanout modes are only halve a frame duration. + * Double the dotclock to get halve the frame-/line-/pixelduration. + */ + if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) + dotclock *= 2; + /* Valid dotclock? */ if (dotclock > 0) { /* Convert scanline length in pixels and video dot clock to @@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, return -EAGAIN; } - /* Don't know yet how to handle interlaced or - * double scan modes. Just no-op for now. - */ - if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); - return -ENOTSUPP; - } - /* Get current scanout position with system timestamp. * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times * if single query takes longer than max_error nanoseconds. @@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) if (rc) { tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; vblanktimestamp(dev, crtc, tslot) = t_vblank; - smp_wmb(); } + smp_mb__before_atomic_inc(); atomic_add(diff, &dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); } /** @@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - int crtc, ret = 0; + int ret = 0; + unsigned int crtc; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) @@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) * e.g., due to spurious vblank interrupts. We need to * ignore those for accounting. */ - if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { + if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { /* Store new timestamp in ringbuffer. */ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; - smp_wmb(); /* Increment cooked vblank count. This also atomically commits * the timestamp computed above. */ + smp_mb__before_atomic_inc(); atomic_inc(&dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); } else { DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", crtc, (int) diff_ns); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c5502..4ff9b6cc973f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) int max_freq; /* RPSTAT1 is in the GT power well */ - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); @@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", max_freq * 100); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } else { seq_printf(m, "no P-state info available\n"); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766f2081..e33d9be7df3b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + mmio_bar = IS_GEN2(dev) ? 1 : 0; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0ad533f06af9..22ec066adae6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); +unsigned int i915_semaphores = 0; +module_param_named(semaphores, i915_semaphores, int, 0600); + unsigned int i915_enable_rc6 = 0; module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); @@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev) } } -void __gen6_force_wake_get(struct drm_i915_private *dev_priv) +void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; @@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) udelay(10); } -void __gen6_force_wake_put(struct drm_i915_private *dev_priv) +void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); POSTING_READ(FORCEWAKE); } +void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int loop = 500; + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + while (fifo < 20 && loop--) { + udelay(10); + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + } +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65dfe81d0035..456f40484838 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; extern unsigned int i915_powersave; +extern unsigned int i915_semaphores; extern unsigned int i915_lvds_downclock; extern unsigned int i915_panel_use_ssc; extern unsigned int i915_enable_rc6; @@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device *dev, struct drm_file *file); +uint32_t +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); + /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); @@ -1353,22 +1357,32 @@ __i915_write(64, q) * must be set to prevent GT core from power down and stale values being * returned. */ -void __gen6_force_wake_get(struct drm_i915_private *dev_priv); -void __gen6_force_wake_put (struct drm_i915_private *dev_priv); -static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) +void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); +void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); +void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); + +static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val; if (dev_priv->info->gen >= 6) { - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); val = I915_READ(reg); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } else val = I915_READ(reg); return val; } +static inline void i915_gt_write(struct drm_i915_private *dev_priv, + u32 reg, u32 val) +{ + if (dev_priv->info->gen >= 6) + __gen6_gt_wait_for_fifo(dev_priv); + I915_WRITE(reg, val); +} + static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4f74c7c6fb..36e66cc5225e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) * Return the required GTT alignment for an object, only taking into account * unfenced tiled surface requirements. */ -static uint32_t +uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e825f2..50ab1614571c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, if (from == NULL || to == from) return 0; - /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ - if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) + /* XXX gpu semaphores are implicated in various hard hangs on SNB */ + if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) return i915_gem_object_wait_rendering(obj, true); idx = intel_ring_sync_index(from, to); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c5..d64843e18df2 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); - obj->tiling_changed = true; - obj->tiling_mode = args->tiling_mode; - obj->stride = args->stride; + /* Rebind if we need a change of alignment */ + if (!obj->map_and_fenceable) { + u32 unfenced_alignment = + i915_gem_get_unfenced_gtt_alignment(obj); + if (obj->gtt_offset & (unfenced_alignment - 1)) + ret = i915_gem_object_unbind(obj); + } + + if (ret == 0) { + obj->tiling_changed = true; + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; + } } + /* we have to maintain this existing ABI... */ + args->stride = obj->stride; + args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dcc1aa..8a9e08bf1cf7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); @@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) } else { hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; - hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; - I915_WRITE(FDI_RXA_IMR, 0); - I915_WRITE(FDI_RXB_IMR, 0); + hotplug_mask |= SDE_AUX_MASK; } dev_priv->pch_irq_mask = ~hotplug_mask; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 15d94c63918c..2abe240dae58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3271,6 +3271,8 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_ACK 0x130090 +#define GT_FIFO_FREE_ENTRIES 0x120008 + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b006536b3d2..49fb54fd9a18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) u32 blt_ecoskpd; /* Make sure blitter notifies FBC of writes */ - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << GEN6_BLITTER_LOCK_SHIFT; @@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) GEN6_BLITTER_LOCK_SHIFT); I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); POSTING_READ(GEN6_BLITTER_ECOSKPD); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) @@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, + atomic_read(&dev_priv->mm.wedged) || atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. */ ret = i915_gem_object_flush_gpu(obj, false); - if (ret) { - i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + (void) ret; } ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, @@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) atomic_read(&obj->pending_flip) == 0); } +static bool intel_crtc_driving_pch(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + + /* + * If there's a non-PCH eDP on this crtc, it must be DP_A, and that + * must be driven by its own crtc; no sharing is possible. + */ + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + if (encoder->base.crtc != crtc) + continue; + + switch (encoder->type) { + case INTEL_OUTPUT_EDP: + if (!intel_encoder_is_pch_edp(&encoder->base)) + return false; + continue; + } + } + + return true; +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp; + bool is_pch_port = false; if (intel_crtc->active) return; @@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); } - ironlake_fdi_enable(crtc); + is_pch_port = intel_crtc_driving_pch(crtc); + + if (is_pch_port) + ironlake_fdi_enable(crtc); + else { + /* disable CPU FDI tx and PCH FDI rx */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_ENABLE); + POSTING_READ(reg); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(0x7 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(100); + + /* Ironlake workaround, disable clock pointer after downing FDI */ + if (HAS_PCH_IBX(dev)) + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + + /* still set train pattern 1 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + /* BPC in FDI rx is consistent with that in PIPECONF */ + temp &= ~(0x07 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(100); + } /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && @@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_flush_display_plane(dev, plane); } + /* Skip the PCH stuff if possible */ + if (!is_pch_port) + goto done; + /* For PCH output, training FDI link */ if (IS_GEN6(dev)) gen6_fdi_link_train(crtc); @@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(reg, temp | TRANS_ENABLE); if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder %d\n", pipe); - +done: intel_crtc_load_lut(crtc); intel_update_fbc(dev); intel_crtc_update_cursor(crtc, true); @@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) * userspace... */ I915_WRITE(GEN6_RC_STATE, 0); - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } void intel_enable_clock_gating(struct drm_device *dev) @@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev) POSTING_READ(RSTDBYCTL); } - ironlake_disable_rc6(dev); + ironlake_teardown_rc6(dev); } static int ironlake_setup_rc6(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c65992df458d..f8f86e57df22 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -208,7 +208,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) val &= ~1; pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); val *= lbpc; - val >>= 1; } } @@ -235,11 +234,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) if (is_backlight_combination_mode(dev)){ u32 max = intel_panel_get_max_backlight(dev); - u8 lpbc; + u8 lbpc; - lpbc = level * 0xfe / max + 1; - level /= lpbc; - pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); + lbpc = level * 0xfe / max + 1; + level /= lbpc; + pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); } tmp = I915_READ(BLC_PWM_CTL); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde85a636..34306865a5df 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -14,22 +14,23 @@ struct intel_hw_status_page { struct drm_i915_gem_object *obj; }; -#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) +#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) +#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) -#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) +#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) -#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) +#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) -#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) +#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) -#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) +#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) -#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) +#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 49e5e99917e2..6bdab891c64e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, entry->tvconf.has_component_output = false; break; case OUTPUT_LVDS: - if ((conn & 0x00003f00) != 0x10) + if ((conn & 0x00003f00) >> 8 != 0x10) entry->lvdsconf.use_straps_for_mode = true; entry->lvdsconf.use_power_scripts = true; break; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a7fae26f4654..a52184007f5f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); - nouveau_vm_put(&nvbo->vma); + if (nvbo->vma.node) { + nouveau_vm_unmap(&nvbo->vma); + nouveau_vm_put(&nvbo->vma); + } kfree(nvbo); } @@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, } } + nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; @@ -166,17 +170,17 @@ static void set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; if (dev_priv->card_type == NV_10 && - nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { + nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->bo.mem.num_pages < vram_pages / 2) { /* * Make sure that the color and depth buffers are handled * by independent memory controller units. Up to a 9x * speed up when alpha-blending and depth-test are enabled * at the same time. */ - int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { nvbo->placement.fpfn = vram_pages / 2; nvbo->placement.lpfn = ~0; @@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); if (ret) goto out; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a21e00076839..390d82c3c4b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector) int high_w = 0, high_h = 0, high_v = 0; list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { + mode->vrefresh = drm_mode_vrefresh(mode); if (helper->mode_valid(connector, mode) != MODE_OK || (mode->flags & DRM_MODE_FLAG_INTERLACE)) continue; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65699bfaaaea..b368ed74aad7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan) return ret; /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, + &chan->m2mf_ntfy); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9821fcacc3d2..982d70b12722 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager; extern int nouveau_notifier_init_channel(struct nouveau_channel *); extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, - int cout, uint32_t *offset); + int cout, uint32_t start, uint32_t end, + uint32_t *offset); extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, struct drm_file *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..b0fb9bdcddb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, mem->page_alignment << PAGE_SHIFT, size_nc, (nvbo->tile_flags >> 8) & 0xff, &node); - if (ret) - return ret; + if (ret) { + mem->mm_node = NULL; + return (ret == -ENOSPC) ? 0 : ret; + } node->page_shift = 12; if (nvbo->vma.node) diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50c3e54..7609756b6faf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c @@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, return 0; } - return -ENOMEM; + return -ENOSPC; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d604b820..5ea167623a82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, - int size, uint32_t *b_offset) + int size, uint32_t start, uint32_t end, + uint32_t *b_offset) { struct drm_device *dev = chan->dev; struct nouveau_gpuobj *nobj = NULL; @@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, uint32_t offset; int target, ret; - mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); + mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, + start, end, 0); if (mem) - mem = drm_mm_get_block(mem, size, 0); + mem = drm_mm_get_block_range(mem, size, 0, start, end); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; @@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, if (IS_ERR(chan)) return PTR_ERR(chan); - ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); + ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, + &na->offset); nouveau_channel_put(&chan); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index f05c0cddfeca..4399e2f34db4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev) struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_level *perflvl; - if (pm->cur == &pm->boot) + if (!pm->cur || pm->cur == &pm->boot) return; perflvl = pm->cur; diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index ef23550407b5..c82db37d9f41 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, if (nv_encoder->dcb->type == OUTPUT_LVDS) { bool duallink, dummy; - nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> - clock, &duallink, &dummy); + nouveau_bios_parse_lvds_table(dev, output_mode->clock, + &duallink, &dummy); if (duallink) regp->fp_control |= (8 << 28); } else @@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) return; if (nv_encoder->dcb->lvdsconf.use_power_scripts) { - struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); - /* when removing an output, crtc may not be set, but PANEL_OFF * must still be run */ @@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) nv04_dfp_get_bound_head(dev, nv_encoder->dcb); if (mode == DRM_MODE_DPMS_ON) { - if (!nv_connector->native_mode) { - NV_ERROR(dev, "Not turning on LVDS without native mode\n"); - return; - } call_lvds_script(dev, nv_encoder->dcb, head, - LVDS_PANEL_ON, nv_connector->native_mode->clock); + LVDS_PANEL_ON, nv_encoder->mode.clock); } else /* pxclk of 0 is fine for PANEL_OFF, and for a * disconnected LVDS encoder there is no native_mode diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 8870d72388c8..18d30c2c1aa6 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; switch (dev_priv->chipset) { + case 0x40: + case 0x41: /* guess */ + case 0x42: + case 0x43: + case 0x45: /* guess */ + case 0x4e: + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); + break; case 0x44: case 0x4a: - case 0x4e: nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); break; - case 0x46: case 0x47: case 0x49: case 0x4b: + case 0x4c: + case 0x67: + default: nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); @@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); break; - - default: - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); - nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); - nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); - nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); - nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); - break; } } @@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev) break; default: switch (dev_priv->chipset) { - case 0x46: - case 0x47: - case 0x49: - case 0x4b: - nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); - nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); - break; - default: + case 0x41: + case 0x42: + case 0x43: + case 0x45: + case 0x4e: + case 0x44: + case 0x4a: nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); break; + default: + nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); + nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); + break; } nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..e57caa2a00e3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) void nv50_instmem_flush(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); nv_wr32(dev, 0x00330c, 0x00000001); if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); + spin_unlock(&dev_priv->ramin_lock); } void nv84_instmem_flush(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); nv_wr32(dev, 0x070000, 0x00000001); if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); + spin_unlock(&dev_priv->ramin_lock); } diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 459ff08241e5..6144156f255a 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm) void nv50_vm_flush_engine(struct drm_device *dev, int engine) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); nv_wr32(dev, 0x100c80, (engine << 16) | 1); if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); + spin_unlock(&dev_priv->ramin_lock); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 095bc507fb16..a4e5e53e0a62 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* use recommended ref_div for ss */ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; if (ss_enabled) { if (ss->refdiv) { + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = ss->refdiv; if (ASIC_IS_AVIVO(rdev)) @@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; pll->flags |= RADEON_PLL_USE_POST_DIV; pll->post_div = args.v3.sOutput.ucPostDiv; } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d270b3ff896b..6140ea1de45a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2194,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; } rdev->mc.visible_vram_size = rdev->mc.aper_size; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r700_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -2934,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev) /* XXX: ontario has problems blitting to gart at the moment */ if (rdev->family == CHIP_PALM) { rdev->asic->copy = NULL; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); } /* allocate wb buffer */ diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2adfb03f479b..2be698e78ff2 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -623,7 +623,7 @@ done: dev_err(rdev->dev, "(%d) pin blit object failed\n", r); return r; } - rdev->mc.active_vram_size = rdev->mc.real_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } @@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev) { int r; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 56deae5bf02e..e372f9e1e5ce 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520); void r100_pre_page_flip(struct radeon_device *rdev, int crtc) { - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; - u32 tmp; - - /* make sure flip is at vb rather than hb */ - tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); - tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; - /* make sure pending bit is asserted */ - tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; - WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); - - /* set pageflip to happen as late as possible in the vblank interval. - * same field for crtc1/2 - */ - tmp = RREG32(RADEON_CRTC_GEN_CNTL); - tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; - WREG32(RADEON_CRTC_GEN_CNTL, tmp); - /* enable the pflip int */ radeon_irq_kms_pflip_irq_get(rdev, crtc); } @@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) return r; } rdev->cp.ready = true; - rdev->mc.active_vram_size = rdev->mc.real_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } @@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev) void r100_cp_disable(struct radeon_device *rdev) { /* Disable ring */ - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); rdev->cp.ready = false; WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_CNTL, 0); @@ -2329,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) /* FIXME we don't use the second aperture yet when we could use it */ if (rdev->mc.visible_vram_size > rdev->mc.aper_size) rdev->mc.visible_vram_size = rdev->mc.aper_size; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); if (rdev->flags & RADEON_IS_IGP) { uint32_t tom; @@ -3490,7 +3472,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track track->num_texture = 16; track->maxy = 4096; track->separate_cube = 0; - track->aaresolve = true; + track->aaresolve = false; track->aa.robj = NULL; } @@ -3801,8 +3783,6 @@ static int r100_startup(struct radeon_device *rdev) r100_mc_program(rdev); /* Resume clock */ r100_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ -// r100_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r100_enable_bm(rdev); diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 768c60ee4ab6..069efa8c8ecf 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_X16: + case R300_TX_FORMAT_FL_I16: case R300_TX_FORMAT_Y8X8: case R300_TX_FORMAT_Z5Y6X5: case R300_TX_FORMAT_Z6Y5X5: @@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_Y16X16: + case R300_TX_FORMAT_FL_I16A16: case R300_TX_FORMAT_Z11Y11X10: case R300_TX_FORMAT_Z10Y11X11: case R300_TX_FORMAT_W8Z8Y8X8: diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index de88624d5f87..9b3fad23b76c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r600_vram_gtt_location(rdev, &rdev->mc); if (rdev->flags & RADEON_IS_IGP) { @@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) */ void r600_cp_stop(struct radeon_device *rdev) { - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); WREG32(SCRATCH_UMSK, 0); } diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 41f7aafc97c4..df68d91e8190 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -558,7 +558,7 @@ done: dev_err(rdev->dev, "(%d) pin blit object failed\n", r); return r; } - rdev->mc.active_vram_size = rdev->mc.real_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } @@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev) { int r; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 56c48b67ef3d..6b3429495118 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -345,7 +345,6 @@ struct radeon_mc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; - u64 active_vram_size; u64 gtt_size; u64 gtt_start; u64 gtt_end; @@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev); extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); +extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ extern bool r600_card_posted(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e75d63b8e21d..793c5e6026ad 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = { .pm_finish = &evergreen_pm_finish, .pm_init_profile = &rs780_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &evergreen_pre_page_flip, + .page_flip = &evergreen_page_flip, + .post_page_flip = &evergreen_post_page_flip, }; static struct radeon_asic btc_asic = { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0e657095de7c..3e7e7f9eb781 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { + for (post_div = max_post_div; post_div >= min_post_div; --post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 66324b5bb5ba..cc44bdfec80f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, u32 tiling_flags = 0; int ret; int aligned_size, size; + int height = mode_cmd->height; /* need to align pitch with crtc limits */ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); - size = mode_cmd->pitch * mode_cmd->height; + if (rdev->family >= CHIP_R600) + height = ALIGN(mode_cmd->height, 8); + size = mode_cmd->pitch * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index df95eb83dac6..1fe95dfe48c9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; + struct ttm_mem_type_manager *man; + + man = &rdev->mman.bdev.man[TTM_PL_VRAM]; args->vram_size = rdev->mc.real_vram_size; - args->vram_visible = rdev->mc.real_vram_size; + args->vram_visible = (u64)man->size << PAGE_SHIFT; if (rdev->stollen_vga_memory) args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); args->vram_visible -= radeon_fbdev_total_size(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index cf0638c3b7c7..78968b738e88 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, (target_fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; - + crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | @@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, gen_cntl_val = RREG32(gen_cntl_reg); gen_cntl_val &= ~(0xf << 8); gen_cntl_val |= (format << 8); + gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK; WREG32(gen_cntl_reg, gen_cntl_val); crtc_offset = (u32)base; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e5b2cf10cbf4..8389b4c63d12 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev) DRM_INFO("radeon: ttm finalized\n"); } +/* this should only be called at bootup or when userspace + * isn't running */ +void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) +{ + struct ttm_mem_type_manager *man; + + if (!rdev->mman.initialized) + return; + + man = &rdev->mman.bdev.man[TTM_PL_VRAM]; + /* this just adjusts TTM size idea, which sets lpfn to the correct value */ + man->size = size >> PAGE_SHIFT; +} + static struct vm_operations_struct radeon_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops = NULL; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5afe294ed51f..8af4679db23e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.visible_vram_size = rdev->mc.aper_size; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); base = RREG32_MC(R_000004_MC_FB_LOCATION); base = G_000004_MC_FB_START(base) << 16; diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 6638c8e4c81b..66c949b7c18c 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.visible_vram_size = rdev->mc.aper_size; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index d8ba67690656..714ad45757d0 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev) */ void r700_cp_stop(struct radeon_device *rdev) { - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); WREG32(SCRATCH_UMSK, 0); } @@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; - rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r700_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 773e484f1646..297bc9a7d6e6 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -238,13 +238,13 @@ config SENSORS_K8TEMP will be called k8temp. config SENSORS_K10TEMP - tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" + tristate "AMD Family 10h/11h/12h/14h temperature sensor" depends on X86 && PCI help If you say yes here you get support for the temperature sensor(s) inside your CPU. Supported are later revisions of - the AMD Family 10h and all revisions of the AMD Family 11h - microarchitectures. + the AMD Family 10h and all revisions of the AMD Family 11h, + 12h (Llano), and 14h (Brazos) microarchitectures. This driver can also be built as a module. If so, the module will be called k10temp. @@ -455,13 +455,14 @@ config SENSORS_JZ4740 called jz4740-hwmon. config SENSORS_JC42 - tristate "JEDEC JC42.4 compliant temperature sensors" + tristate "JEDEC JC42.4 compliant memory module temperature sensors" depends on I2C help - If you say yes here you get support for Jedec JC42.4 compliant - temperature sensors. Support will include, but not be limited to, - ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, - MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. + If you say yes here, you get support for JEDEC JC42.4 compliant + temperature sensors, which are used on many DDR3 memory modules for + mobile devices and servers. Support will include, but not be limited + to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, + MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3. This driver can also be built as a module. If so, the module will be called jc42. @@ -574,7 +575,7 @@ config SENSORS_LM85 help If you say yes here you get support for National Semiconductor LM85 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, - EMC6D101 and EMC6D102. + EMC6D101, EMC6D102, and EMC6D103. This driver can also be built as a module. If so, the module will be called lm85. diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c index 86d822aa9bbf..d46c0c758ddf 100644 --- a/drivers/hwmon/ad7414.c +++ b/drivers/hwmon/ad7414.c @@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = { { "ad7414", 0 }, {} }; +MODULE_DEVICE_TABLE(i2c, ad7414_id); static struct i2c_driver ad7414_driver = { .driver = { diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index f13c843a2964..5cc3e3784b42 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c @@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = { { "adt7411", 0 }, { } }; +MODULE_DEVICE_TABLE(i2c, adt7411_id); static struct i2c_driver adt7411_driver = { .driver = { diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 3f49dd376f02..6e06019015a5 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -37,7 +37,7 @@ #define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ #define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ -#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */ +#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ @@ -2111,7 +2111,6 @@ static int f71882fg_remove(struct platform_device *pdev) int nr_fans = (data->type == f71882fg) ? 4 : 3; u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); - platform_set_drvdata(pdev, NULL); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); @@ -2178,6 +2177,7 @@ static int f71882fg_remove(struct platform_device *pdev) } } + platform_set_drvdata(pdev, NULL); kfree(data); return 0; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 340fc78c8dde..934991237061 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = { /* Configuration register defines */ #define JC42_CFG_CRIT_ONLY (1 << 2) +#define JC42_CFG_TCRIT_LOCK (1 << 6) +#define JC42_CFG_EVENT_LOCK (1 << 7) #define JC42_CFG_SHUTDOWN (1 << 8) #define JC42_CFG_HYST_SHIFT 9 #define JC42_CFG_HYST_MASK 0x03 @@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct jc42_data *data = i2c_get_clientdata(client); - long val; + unsigned long val; int diff, hyst; int err; int ret = count; @@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev, static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL); -static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, set_temp_crit); -static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, set_temp_min); -static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, set_temp_max); -static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, set_temp_crit_hyst); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_max_hyst, NULL); @@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = { NULL }; +static mode_t jc42_attribute_mode(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct i2c_client *client = to_i2c_client(dev); + struct jc42_data *data = i2c_get_clientdata(client); + unsigned int config = data->config; + bool readonly; + + if (attr == &dev_attr_temp1_crit.attr) + readonly = config & JC42_CFG_TCRIT_LOCK; + else if (attr == &dev_attr_temp1_min.attr || + attr == &dev_attr_temp1_max.attr) + readonly = config & JC42_CFG_EVENT_LOCK; + else if (attr == &dev_attr_temp1_crit_hyst.attr) + readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK); + else + readonly = true; + + return S_IRUGO | (readonly ? 0 : S_IWUSR); +} + static const struct attribute_group jc42_group = { .attrs = jc42_attributes, + .is_visible = jc42_attribute_mode, }; /* Return 0 if detection is successful, -ENODEV otherwise */ diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index da5a2404cd3e..82bf65aa2968 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,5 +1,5 @@ /* - * k10temp.c - AMD Family 10h/11h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> * @@ -25,7 +25,7 @@ #include <linux/pci.h> #include <asm/processor.h> -MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); +MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL"); @@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 1e229847f37a..d2cc28660816 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; enum chips { any_chip, lm85b, lm85c, adm1027, adt7463, adt7468, - emc6d100, emc6d102 + emc6d100, emc6d102, emc6d103 }; /* The LM85 registers */ @@ -90,6 +90,9 @@ enum chips { #define LM85_VERSTEP_EMC6D100_A0 0x60 #define LM85_VERSTEP_EMC6D100_A1 0x61 #define LM85_VERSTEP_EMC6D102 0x65 +#define LM85_VERSTEP_EMC6D103_A0 0x68 +#define LM85_VERSTEP_EMC6D103_A1 0x69 +#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */ #define LM85_REG_CONFIG 0x40 @@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = { { "emc6d100", emc6d100 }, { "emc6d101", emc6d100 }, { "emc6d102", emc6d102 }, + { "emc6d103", emc6d103 }, { } }; MODULE_DEVICE_TABLE(i2c, lm85_id); @@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) case LM85_VERSTEP_EMC6D102: type_name = "emc6d102"; break; + case LM85_VERSTEP_EMC6D103_A0: + case LM85_VERSTEP_EMC6D103_A1: + type_name = "emc6d103"; + break; + /* + * Registers apparently missing in EMC6D103S/EMC6D103:A2 + * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102 + * (according to the data sheets), but used unconditionally + * in the driver: 62[5:7], 6D[0:7], and 6E[0:7]. + * So skip EMC6D103S for now. + case LM85_VERSTEP_EMC6D103S: + type_name = "emc6d103s"; + break; + */ } } else { dev_dbg(&adapter->dev, @@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client, case adt7468: case emc6d100: case emc6d102: + case emc6d103: data->freq_map = adm1027_freq_map; break; default: @@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) /* More alarm bits */ data->alarms |= lm85_read_value(client, EMC6D100_REG_ALARM3) << 16; - } else if (data->type == emc6d102) { + } else if (data->type == emc6d102 || data->type == emc6d103) { /* Have to read LSB bits after the MSB ones because the reading of the MSB bits has frozen the LSBs (backward from the ADM1027). diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 2e067dd2ee51..50ea1f43bdc1 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -29,6 +29,7 @@ #include <linux/pci.h> #include <linux/mutex.h> #include <linux/ktime.h> +#include <linux/slab.h> #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index ef3bcb1ce864..61653f079671 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = { static int ocores_i2c_of_probe(struct platform_device* pdev, struct ocores_i2c* i2c) { - __be32* val; + const __be32* val; val = of_get_property(pdev->dev.of_node, "regstep", NULL); if (!val) { diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b605ff3a1fa0..58a58c7eaa17 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) * REVISIT: Some wkup sources might not be needed. */ dev->westate = OMAP_I2C_WE_ALL; - if (dev->rev < OMAP_I2C_REV_ON_4430) - omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, - dev->westate); + omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); } } omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); @@ -847,11 +845,15 @@ complete: dev_err(dev->dev, "Arbitration lost\n"); err |= OMAP_I2C_STAT_AL; } + /* + * ProDB0017052: Clear ARDY bit twice + */ if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | - OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); + OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | + OMAP_I2C_STAT_ARDY)); omap_i2c_complete_cmd(dev, err); return IRQ_HANDLED; } @@ -1137,12 +1139,41 @@ omap_i2c_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_SUSPEND +static int omap_i2c_suspend(struct device *dev) +{ + if (!pm_runtime_suspended(dev)) + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) + dev->bus->pm->runtime_suspend(dev); + + return 0; +} + +static int omap_i2c_resume(struct device *dev) +{ + if (!pm_runtime_suspended(dev)) + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) + dev->bus->pm->runtime_resume(dev); + + return 0; +} + +static struct dev_pm_ops omap_i2c_pm_ops = { + .suspend = omap_i2c_suspend, + .resume = omap_i2c_resume, +}; +#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) +#else +#define OMAP_I2C_PM_OPS NULL +#endif + static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", .owner = THIS_MODULE, + .pm = OMAP_I2C_PM_OPS, }, }; diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 495be451d326..266135ddf7fa 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev) adap->owner = THIS_MODULE; /* DDC class but actually often used for more generic I2C */ adap->class = I2C_CLASS_DDC; - strncpy(adap->name, "ST Microelectronics DDC I2C adapter", + strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", sizeof(adap->name)); adap->nr = bus_nr; adap->algo = &stu300_algo; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 1fa091e05690..4a5c4a44ffb1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -62,6 +62,7 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <asm/mwait.h> +#include <asm/msr.h> #define INTEL_IDLE_VERSION "0.4" #define PREFIX "intel_idle: " @@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); static struct cpuidle_state *cpuidle_state_table; /* + * Hardware C-state auto-demotion may not always be optimal. + * Indicate which enable bits to clear here. + */ +static unsigned long long auto_demotion_disable_flags; + +/* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the @@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = { .notifier_call = setup_broadcast_cpuhp_notify, }; +static void auto_demotion_disable(void *dummy) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); + msr_bits &= ~auto_demotion_disable_flags; + wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); +} + /* * intel_idle_probe() */ @@ -324,11 +340,17 @@ static int intel_idle_probe(void) case 0x25: /* Westmere */ case 0x2C: /* Westmere */ cpuidle_state_table = nehalem_cstates; + auto_demotion_disable_flags = + (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); break; case 0x1C: /* 28 - Atom Processor */ + cpuidle_state_table = atom_cstates; + break; + case 0x26: /* 38 - Lincroft Atom Processor */ cpuidle_state_table = atom_cstates; + auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; break; case 0x2A: /* SNB */ @@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void) return -EIO; } } + if (auto_demotion_disable_flags) + smp_call_function(auto_demotion_disable, NULL, 1); return 0; } diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b606fd64022..08c194861af5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) netif_carrier_on(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 0) { - nesdev->iw_status = 1; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } @@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) netif_carrier_off(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 1) { - nesdev->iw_status = 0; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 1) { + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } @@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work) netif_carrier_on(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 0) { - nesdev->iw_status = 1; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } @@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work) netif_carrier_off(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 1) { - nesdev->iw_status = 0; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 1) { + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 8245237b67ce..eca0c41f1226 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) * there are still requests that haven't been acked. */ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && - !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) + !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && + (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) start_timer(qp); while (qp->s_last != qp->s_acked) { @@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, } spin_lock_irqsave(&qp->s_lock, flags); + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto ack_done; /* Ignore invalid responses. */ if (qib_cmp24(psn, qp->s_next_psn) >= 0) diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 23cf8fc933ec..5b8f59d6c3e8 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner, event->owner = owner; list_add_tail(&event->node, &gameport_event_list); - schedule_work(&gameport_event_work); + queue_work(system_long_wq, &gameport_event_work); out: spin_unlock_irqrestore(&gameport_event_lock, flags); diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index ac471b77c18e..99ce9032d08c 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -71,8 +71,9 @@ struct tegra_kbc { spinlock_t lock; unsigned int repoll_dly; unsigned long cp_dly_jiffies; + bool use_fn_map; const struct tegra_kbc_platform_data *pdata; - unsigned short keycode[KBC_MAX_KEY]; + unsigned short keycode[KBC_MAX_KEY * 2]; unsigned short current_keys[KBC_MAX_KPENT]; unsigned int num_pressed_keys; struct timer_list timer; @@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = { KEY(15, 5, KEY_F2), KEY(15, 6, KEY_CAPSLOCK), KEY(15, 7, KEY_F6), + + /* Software Handled Function Keys */ + KEY(20, 0, KEY_KP7), + + KEY(21, 0, KEY_KP9), + KEY(21, 1, KEY_KP8), + KEY(21, 2, KEY_KP4), + KEY(21, 4, KEY_KP1), + + KEY(22, 1, KEY_KPSLASH), + KEY(22, 2, KEY_KP6), + KEY(22, 3, KEY_KP5), + KEY(22, 4, KEY_KP3), + KEY(22, 5, KEY_KP2), + KEY(22, 7, KEY_KP0), + + KEY(27, 1, KEY_KPASTERISK), + KEY(27, 3, KEY_KPMINUS), + KEY(27, 4, KEY_KPPLUS), + KEY(27, 5, KEY_KPDOT), + + KEY(28, 5, KEY_VOLUMEUP), + + KEY(29, 3, KEY_HOME), + KEY(29, 4, KEY_END), + KEY(29, 5, KEY_BRIGHTNESSDOWN), + KEY(29, 6, KEY_VOLUMEDOWN), + KEY(29, 7, KEY_BRIGHTNESSUP), + + KEY(30, 0, KEY_NUMLOCK), + KEY(30, 1, KEY_SCROLLLOCK), + KEY(30, 2, KEY_MUTE), + + KEY(31, 4, KEY_HELP), }; static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { @@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) unsigned int i; unsigned int num_down = 0; unsigned long flags; + bool fn_keypress = false; spin_lock_irqsave(&kbc->lock, flags); for (i = 0; i < KBC_MAX_KPENT; i++) { @@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); scancodes[num_down] = scancode; - keycodes[num_down++] = kbc->keycode[scancode]; + keycodes[num_down] = kbc->keycode[scancode]; + /* If driver uses Fn map, do not report the Fn key. */ + if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map) + fn_keypress = true; + else + num_down++; } val >>= 8; } + + /* + * If the platform uses Fn keymaps, translate keys on a Fn keypress. + * Function keycodes are KBC_MAX_KEY apart from the plain keycodes. + */ + if (fn_keypress) { + for (i = 0; i < num_down; i++) { + scancodes[i] += KBC_MAX_KEY; + keycodes[i] = kbc->keycode[scancodes[i]]; + } + } + spin_unlock_irqrestore(&kbc->lock, flags); tegra_kbc_report_released_keys(kbc->idev, @@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev) input_dev->keycode = kbc->keycode; input_dev->keycodesize = sizeof(kbc->keycode[0]); - input_dev->keycodemax = ARRAY_SIZE(kbc->keycode); + input_dev->keycodemax = KBC_MAX_KEY; + if (pdata->use_fn_map) + input_dev->keycodemax *= 2; + kbc->use_fn_map = pdata->use_fn_map; keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, input_dev->keycode, input_dev->keybit); diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 25e5d042a72c..7453938bf5ef 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -51,6 +51,29 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) + +/* + * The following describes response for the 0x0c query. + * + * byte mask name meaning + * ---- ---- ------- ------------ + * 1 0x01 adjustable threshold capacitive button sensitivity + * can be adjusted + * 1 0x02 report max query 0x0d gives max coord reported + * 1 0x04 clearpad sensor is ClearPad product + * 1 0x08 advanced gesture not particularly meaningful + * 1 0x10 clickpad bit 0 1-button ClickPad + * 1 0x60 multifinger mode identifies firmware finger counting + * (not reporting!) algorithm. + * Not particularly meaningful + * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered + * 2 0x01 clickpad bit 1 2-button ClickPad + * 2 0x02 deluxe LED controls touchpad support LED commands + * ala multimedia control bar + * 2 0x04 reduced filtering firmware does less filtering on + * position data, driver should watch + * for noise. + */ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 7c38d1fbabf2..ba70058e2be3 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner, event->owner = owner; list_add_tail(&event->node, &serio_event_list); - schedule_work(&serio_event_work); + queue_work(system_long_wq, &serio_event_work); out: spin_unlock_irqrestore(&serio_event_lock, flags); diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c index 18f8798442fa..7bd5baa547be 100644 --- a/drivers/isdn/hardware/eicon/istream.c +++ b/drivers/isdn/hardware/eicon/istream.c @@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a, stream interface. If synchronous service was requested, then function does return amount of data written to stream. - 'final' does indicate that pice of data to be written is + 'final' does indicate that piece of data to be written is final part of frame (necessary only by structured datatransfer) return 0 if zero lengh packet was written return -1 if stream is full diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 0858791978d8..cfff0c41d298 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c @@ -1247,10 +1247,10 @@ static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; - struct sk_buff *skb, *oskb; + struct sk_buff *skb; struct Layer2 *l2 = &st->l2; u_char header[MAX_HEADER_LEN]; - int i; + int i, hdr_space_needed; int unsigned p1; u_long flags; @@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) if (!skb) return; + hdr_space_needed = l2headersize(l2, 0); + if (hdr_space_needed > skb_headroom(skb)) { + struct sk_buff *orig_skb = skb; + + skb = skb_realloc_headroom(skb, hdr_space_needed); + if (!skb) { + dev_kfree_skb(orig_skb); + return; + } + } spin_lock_irqsave(&l2->lock, flags); if(test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; @@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) l2->vs = (l2->vs + 1) % 8; } spin_unlock_irqrestore(&l2->lock, flags); - p1 = skb->data - skb->head; - if (p1 >= i) - memcpy(skb_push(skb, i), header, i); - else { - printk(KERN_WARNING - "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); - oskb = skb; - skb = alloc_skb(oskb->len + i, GFP_ATOMIC); - memcpy(skb_put(skb, i), header, i); - skb_copy_from_linear_data(oskb, - skb_put(skb, oskb->len), oskb->len); - dev_kfree_skb(oskb); - } + memcpy(skb_push(skb, i), header, i); st->l2.l2l1(st, PH_PULL | INDICATION, skb); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 8a2f767f26d8..0ed7f6bc2a7f 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev) if (md_check_no_bitmap(mddev)) return -EINVAL; - mddev->queue->queue_lock = &mddev->queue->__queue_lock; conf = linear_conf(mddev, mddev->raid_disks); if (!conf) diff --git a/drivers/md/md.c b/drivers/md/md.c index 0cc30ecda4c1..818313e277e7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit) { mddev_t *mddev, *new = NULL; + if (unit && MAJOR(unit) != MD_MAJOR) + unit &= ~((1<<MdpMinorShift)-1); + retry: spin_lock(&all_mddevs_lock); @@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) } mddev->array_sectors = sectors; - set_capacity(mddev->gendisk, mddev->array_sectors); - if (mddev->pers) + if (mddev->pers) { + set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); - + } return len; } @@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev) } set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); + mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); out: return err; @@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev) mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev->recovery = 0; mddev->in_sync = 0; + mddev->changed = 0; mddev->degraded = 0; mddev->safemode = 0; mddev->bitmap_info.offset = 0; @@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) set_capacity(disk, 0); mutex_unlock(&mddev->open_mutex); + mddev->changed = 1; revalidate_disk(disk); if (mddev->ro) @@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) atomic_inc(&mddev->openers); mutex_unlock(&mddev->open_mutex); - check_disk_size_change(mddev->gendisk, bdev); + check_disk_change(bdev); out: return err; } @@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) return 0; } + +static int md_media_changed(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + return mddev->changed; +} + +static int md_revalidate(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + mddev->changed = 0; + return 0; +} static const struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops = .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, + .media_changed = md_media_changed, + .revalidate_disk= md_revalidate, }; static int md_thread(void * arg) diff --git a/drivers/md/md.h b/drivers/md/md.h index 7e90b8593b2a..12215d437fcc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -274,6 +274,8 @@ struct mddev_s atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ + int changed; /* True if we might need to + * reread partition info */ int degraded; /* whether md should consider * adding a spare */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 6d7ddf32ef2e..3a62d440e27b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev) * bookkeeping area. [whatever we allocate in multipath_run(), * should be freed in multipath_stop()] */ - mddev->queue->queue_lock = &mddev->queue->__queue_lock; conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); mddev->private = conf; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 637a96855edb..c0ac457f1218 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev) if (md_check_no_bitmap(mddev)) return -EINVAL; blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); - mddev->queue->queue_lock = &mddev->queue->__queue_lock; /* if private is not null, we are here after takeover */ if (mddev->private == NULL) { @@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev) mddev->new_layout = 0; mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ mddev->delta_disks = 1 - mddev->raid_disks; + mddev->raid_disks = 1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba9..06cd712807d0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf) if (conf->pending_bio_list.head) { struct bio *bio; bio = bio_list_get(&conf->pending_bio_list); + /* Only take the spinlock to quiet a warning */ + spin_lock(conf->mddev->queue->queue_lock); blk_remove_plug(conf->mddev->queue); + spin_unlock(conf->mddev->queue->queue_lock); spin_unlock_irq(&conf->device_lock); /* flush any pending bitmap writes to * disk before proceeding w/ I/O */ @@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) atomic_inc(&r1_bio->remaining); spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); - blk_plug_device(mddev->queue); + blk_plug_device_unlocked(mddev->queue); spin_unlock_irqrestore(&conf->device_lock, flags); } r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); @@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); - mddev->queue->queue_lock = &conf->device_lock; list_for_each_entry(rdev, &mddev->disks, same_set) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3b607b28741b..747d061d8e05 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf) if (conf->pending_bio_list.head) { struct bio *bio; bio = bio_list_get(&conf->pending_bio_list); + /* Spinlock only taken to quiet a warning */ + spin_lock(conf->mddev->queue->queue_lock); blk_remove_plug(conf->mddev->queue); + spin_unlock(conf->mddev->queue->queue_lock); spin_unlock_irq(&conf->device_lock); /* flush any pending bitmap writes to disk * before proceeding w/ I/O */ @@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) atomic_inc(&r10_bio->remaining); spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); - blk_plug_device(mddev->queue); + blk_plug_device_unlocked(mddev->queue); spin_unlock_irqrestore(&conf->device_lock, flags); } @@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev) if (!conf) goto out; - mddev->queue->queue_lock = &conf->device_lock; - mddev->thread = conf->thread; conf->thread = NULL; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 702812824195..78536fdbd87f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev) mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_fn = raid5_congested; - mddev->queue->queue_lock = &conf->device_lock; mddev->queue->unplug_fn = raid5_unplug_queue; chunk_size = mddev->chunk_sectors << 9; diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c index bc6a67768af1..8c4852114eeb 100644 --- a/drivers/media/common/tuners/tda8290.c +++ b/drivers/media/common/tuners/tda8290.c @@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props) #define TDA8290_ID 0x89 u8 reg = 0x1f, id; struct i2c_msg msg_read[] = { - { .addr = 0x4b, .flags = 0, .len = 1, .buf = ® }, - { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, + { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® }, + { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, }; /* detect tda8290 */ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { - printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", + printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", __func__, reg); return -ENODEV; } @@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props) #define TDA8295C2_ID 0x8b u8 reg = 0x2f, id; struct i2c_msg msg_read[] = { - { .addr = 0x4b, .flags = 0, .len = 1, .buf = ® }, - { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, + { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® }, + { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, }; - /* detect tda8290 */ + /* detect tda8295 */ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { - printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", + printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", __func__, reg); return -ENODEV; } diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index defd83964ce2..193cdb77b76a 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap) return 0; } +static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index, + u16 pid, int onoff) +{ + struct dib0700_state *st = adapter->dev->priv; + if (st->is_dib7000pc) + return dib7000p_pid_filter(adapter->fe, index, pid, onoff); + return dib7000m_pid_filter(adapter->fe, index, pid, onoff); +} + +static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) +{ + struct dib0700_state *st = adapter->dev->priv; + if (st->is_dib7000pc) + return dib7000p_pid_filter_ctrl(adapter->fe, onoff); + return dib7000m_pid_filter_ctrl(adapter->fe, onoff); +} + static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { return dib7000p_pid_filter(adapter->fe, index, pid, onoff); @@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, - .pid_filter = stk70x0p_pid_filter, - .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, + .pid_filter = stk7700p_pid_filter, + .pid_filter_ctrl = stk7700p_pid_filter_ctrl, .frontend_attach = stk7700p_frontend_attach, .tuner_attach = stk7700p_tuner_attach, diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c index 9eea4188303b..46ccd01a7696 100644 --- a/drivers/media/dvb/dvb-usb/lmedm04.c +++ b/drivers/media/dvb/dvb-usb/lmedm04.c @@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev, } /* Default firmware for LME2510C */ -const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; +char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; static void lme_coldreset(struct usb_device *dev) { @@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .download_firmware = lme2510_download_firmware, - .firmware = lme_firmware, + .firmware = (const char *)&lme_firmware, .size_of_priv = sizeof(struct lme2510_state), .num_adapters = 1, .adapter = { @@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit); MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); -MODULE_VERSION("1.74"); +MODULE_VERSION("1.75"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c index c7f5ccf54aa5..289a79837f24 100644 --- a/drivers/media/dvb/frontends/dib7000m.c +++ b/drivers/media/dvb/frontends/dib7000m.c @@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di } EXPORT_SYMBOL(dib7000m_get_i2c_master); +int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) +{ + struct dib7000m_state *state = fe->demodulator_priv; + u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef; + val |= (onoff & 0x1) << 4; + dprintk("PID filter enabled %d", onoff); + return dib7000m_write_word(state, 294 + state->reg_offs, val); +} +EXPORT_SYMBOL(dib7000m_pid_filter_ctrl); + +int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) +{ + struct dib7000m_state *state = fe->demodulator_priv; + dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff); + return dib7000m_write_word(state, 300 + state->reg_offs + id, + onoff ? (1 << 13) | pid : 0); +} +EXPORT_SYMBOL(dib7000m_pid_filter); + #if 0 /* used with some prototype boards */ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h index 113819ce9f0d..81fcf2241c64 100644 --- a/drivers/media/dvb/frontends/dib7000m.h +++ b/drivers/media/dvb/frontends/dib7000m.h @@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int); +extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff); +extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff); #else static inline struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, @@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod, printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; } +static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, + u16 pid, u8 onoff) +{ + printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); + return -ENODEV; +} + +static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, + uint8_t onoff) +{ + printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); + return -ENODEV; +} #endif /* TODO diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c index 59feeb84aec7..10a432a79d00 100644 --- a/drivers/media/dvb/mantis/mantis_pci.c +++ b/drivers/media/dvb/mantis/mantis_pci.c @@ -22,7 +22,6 @@ #include <linux/moduleparam.h> #include <linux/kernel.h> #include <asm/io.h> -#include <asm/pgtable.h> #include <asm/page.h> #include <linux/kmod.h> #include <linux/vmalloc.h> diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c index 73230ff93b8a..01f258a2a57a 100644 --- a/drivers/media/rc/ir-raw.c +++ b/drivers/media/rc/ir-raw.c @@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) { ktime_t now; s64 delta; /* ns */ - struct ir_raw_event ev; + DEFINE_IR_RAW_EVENT(ev); int rc = 0; if (!dev->raw) @@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) * being called for the first time, note that delta can't * possibly be negative. */ - ev.duration = 0; if (delta > IR_MAX_DURATION || !dev->raw->last_type) type |= IR_START_EVENT; else diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 6df0a4980645..e4f8eac7f717 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -148,6 +148,7 @@ enum mceusb_model_type { MCE_GEN2_TX_INV, POLARIS_EVK, CX_HYBRID_TV, + MULTIFUNCTION, }; struct mceusb_model { @@ -155,9 +156,10 @@ struct mceusb_model { u32 mce_gen2:1; u32 mce_gen3:1; u32 tx_mask_normal:1; - u32 is_polaris:1; u32 no_tx:1; + int ir_intfnum; + const char *rc_map; /* Allow specify a per-board map */ const char *name; /* per-board name */ }; @@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = { .tx_mask_normal = 1, }, [POLARIS_EVK] = { - .is_polaris = 1, /* * In fact, the EVK is shipped without * remotes, but we should have something handy, @@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = { .name = "Conexant Hybrid TV (cx231xx) MCE IR", }, [CX_HYBRID_TV] = { - .is_polaris = 1, .no_tx = 1, /* tx isn't wired up at all */ .name = "Conexant Hybrid TV (cx231xx) MCE IR", }, + [MULTIFUNCTION] = { + .mce_gen2 = 1, + .ir_intfnum = 2, + }, }; static struct usb_device_id mceusb_dev_table[] = { @@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = { { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, - /* Realtek MCE IR Receiver */ - { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, + /* Realtek MCE IR Receiver and card reader */ + { USB_DEVICE(VENDOR_REALTEK, 0x0161), + .driver_info = MULTIFUNCTION }, /* SMK/Toshiba G83C0004D410 */ { USB_DEVICE(VENDOR_SMK, 0x031d), .driver_info = MCE_GEN2_TX_INV }, @@ -1101,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf, bool is_gen3; bool is_microsoft_gen1; bool tx_mask_normal; - bool is_polaris; + int ir_intfnum; dev_dbg(&intf->dev, "%s called\n", __func__); @@ -1110,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf, is_gen3 = mceusb_model[model].mce_gen3; is_microsoft_gen1 = mceusb_model[model].mce_gen1; tx_mask_normal = mceusb_model[model].tx_mask_normal; - is_polaris = mceusb_model[model].is_polaris; + ir_intfnum = mceusb_model[model].ir_intfnum; - if (is_polaris) { - /* Interface 0 is IR */ - if (idesc->desc.bInterfaceNumber) - return -ENODEV; - } + /* There are multi-function devices with non-IR interfaces */ + if (idesc->desc.bInterfaceNumber != ir_intfnum) + return -ENODEV; /* step through the endpoints to find first bulk in and out endpoint */ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index 273d9d674792..d4d64492a057 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt) static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) { - /* set number of bytes needed for wake key comparison (default 67) */ - nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP); + /* set number of bytes needed for wake from s3 (default 65) */ + nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES, + CIR_WAKE_FIFO_CMP_DEEP); /* set tolerance/variance allowed per byte during wake compare */ nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE, diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 1df82351cb03..048135eea702 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -305,8 +305,11 @@ struct nvt_dev { #define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 #define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 -/* CIR Wake FIFO buffer is 67 bytes long */ -#define CIR_WAKE_FIFO_LEN 67 +/* + * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes + * the system comparing only 65 bytes (fails with this set to 67) + */ +#define CIR_WAKE_FIFO_CMP_BYTES 65 /* CIR Wake byte comparison tolerance */ #define CIR_WAKE_CMP_TOLERANCE 5 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 512a2f4ada0e..5b4422ef4e6d 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -850,7 +850,7 @@ static ssize_t store_protocols(struct device *device, count++; } else { for (i = 0; i < ARRAY_SIZE(proto_names); i++) { - if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) { + if (!strcasecmp(tmp, proto_names[i].name)) { tmp += strlen(proto_names[i].name); mask = proto_names[i].type; break; diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c index e41e4ad5cc40..9c475c600fc9 100644 --- a/drivers/media/video/au0828/au0828-video.c +++ b/drivers/media/video/au0828/au0828-video.c @@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv, if (rc < 0) return rc; - return videobuf_reqbufs(&fh->vb_vidq, rb); + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + rc = videobuf_reqbufs(&fh->vb_vidq, rb); + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) + rc = videobuf_reqbufs(&fh->vb_vbiq, rb); + + return rc; } static int vidioc_querybuf(struct file *file, void *priv, @@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv, if (rc < 0) return rc; - return videobuf_querybuf(&fh->vb_vidq, b); + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + rc = videobuf_querybuf(&fh->vb_vidq, b); + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) + rc = videobuf_querybuf(&fh->vb_vbiq, b); + + return rc; } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) @@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) if (rc < 0) return rc; - return videobuf_qbuf(&fh->vb_vidq, b); + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + rc = videobuf_qbuf(&fh->vb_vidq, b); + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) + rc = videobuf_qbuf(&fh->vb_vbiq, b); + + return rc; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) @@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) dev->greenscreen_detected = 0; } - return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); + if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); + else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) + rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK); + + return rc; } static struct v4l2_file_operations au0828_v4l_fops = { diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c index 87177733cf92..68ad1963f421 100644 --- a/drivers/media/video/cx18/cx18-cards.c +++ b/drivers/media/video/cx18/cx18-cards.c @@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = { .i2c = &cx18_i2c_std, }; +static const struct cx18_card cx18_card_hvr1600_s5h1411 = { + .type = CX18_CARD_HVR_1600_S5H1411, + .name = "Hauppauge HVR-1600", + .comment = "Simultaneous Digital and Analog TV capture supported\n", + .v4l2_capabilities = CX18_CAP_ENCODER, + .hw_audio_ctrl = CX18_HW_418_AV, + .hw_muxer = CX18_HW_CS5345, + .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | + CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | + CX18_HW_Z8F0811_IR_HAUP, + .video_inputs = { + { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, + { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, + { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, + { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, + { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, + }, + .audio_inputs = { + { CX18_CARD_INPUT_AUD_TUNER, + CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, + { CX18_CARD_INPUT_LINE_IN1, + CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, + { CX18_CARD_INPUT_LINE_IN2, + CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, + }, + .radio_input = { CX18_CARD_INPUT_AUD_TUNER, + CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, + .ddr = { + /* ESMT M13S128324A-5B memory */ + .chip_config = 0x003, + .refresh = 0x30c, + .timing1 = 0x44220e82, + .timing2 = 0x08, + .tune_lane = 0, + .initial_emrs = 0, + }, + .gpio_init.initial_value = 0x3001, + .gpio_init.direction = 0x3001, + .gpio_i2c_slave_reset = { + .active_lo_mask = 0x3001, + .msecs_asserted = 10, + .msecs_recovery = 40, + .ir_reset_mask = 0x0001, + }, + .i2c = &cx18_i2c_std, +}; + static const struct cx18_card cx18_card_hvr1600_samsung = { .type = CX18_CARD_HVR_1600_SAMSUNG, .name = "Hauppauge HVR-1600 (Preproduction)", @@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = { &cx18_card_toshiba_qosmio_dvbt, &cx18_card_leadtek_pvr2100, &cx18_card_leadtek_dvr3100h, - &cx18_card_gotview_dvd3 + &cx18_card_gotview_dvd3, + &cx18_card_hvr1600_s5h1411 }; const struct cx18_card *cx18_get_card(u16 index) diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c index 944af8adbe0c..b1c3cbd92743 100644 --- a/drivers/media/video/cx18/cx18-driver.c +++ b/drivers/media/video/cx18/cx18-driver.c @@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype, "\t\t\t 7 = Leadtek WinFast PVR2100\n" "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" + "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n" "\t\t\t 0 = Autodetect (default)\n" "\t\t\t-1 = Ignore this card\n\t\t"); MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); @@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) switch (cx->card->type) { case CX18_CARD_HVR_1600_ESMT: case CX18_CARD_HVR_1600_SAMSUNG: + case CX18_CARD_HVR_1600_S5H1411: tveeprom_hauppauge_analog(&c, tv, eedata); break; case CX18_CARD_YUAN_MPC718: @@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx) from the model number. Use the cardtype module option if you have one of these preproduction models. */ switch (tv.model) { - case 74000 ... 74999: + case 74301: /* Retail models */ + case 74321: + case 74351: /* OEM models */ + case 74361: + /* Digital side is s5h1411/tda18271 */ + cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411); + break; + case 74021: /* Retail models */ + case 74031: + case 74041: + case 74141: + case 74541: /* OEM models */ + case 74551: + case 74591: + case 74651: + case 74691: + case 74751: + case 74891: + /* Digital side is s5h1409/mxl5005s */ cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); break; case 0x718: @@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx) CX18_ERR("Invalid EEPROM\n"); return; default: - CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model); + CX18_ERR("Unknown model %d, defaulting to original HVR-1600 " + "(cardtype=1)\n", tv.model); cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); break; } diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h index 306caac6d3fc..f736679d2517 100644 --- a/drivers/media/video/cx18/cx18-driver.h +++ b/drivers/media/video/cx18/cx18-driver.h @@ -85,7 +85,8 @@ #define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ #define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ #define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ -#define CX18_CARD_LAST 8 +#define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/ +#define CX18_CARD_LAST 9 #define CX18_ENC_STREAM_TYPE_MPG 0 #define CX18_ENC_STREAM_TYPE_TS 1 diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c index f0381d62518d..f41922bd4020 100644 --- a/drivers/media/video/cx18/cx18-dvb.c +++ b/drivers/media/video/cx18/cx18-dvb.c @@ -29,6 +29,8 @@ #include "cx18-gpio.h" #include "s5h1409.h" #include "mxl5005s.h" +#include "s5h1411.h" +#include "tda18271.h" #include "zl10353.h" #include <linux/firmware.h> @@ -77,6 +79,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = { }; /* + * CX18_CARD_HVR_1600_S5H1411 + */ +static struct s5h1411_config hcw_s5h1411_config = { + .output_mode = S5H1411_SERIAL_OUTPUT, + .gpio = S5H1411_GPIO_OFF, + .vsb_if = S5H1411_IF_44000, + .qam_if = S5H1411_IF_4000, + .inversion = S5H1411_INVERSION_ON, + .status_mode = S5H1411_DEMODLOCKING, + .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, +}; + +static struct tda18271_std_map hauppauge_tda18271_std_map = { + .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, + .if_lvl = 6, .rfagc_top = 0x37 }, + .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, + .if_lvl = 6, .rfagc_top = 0x37 }, +}; + +static struct tda18271_config hauppauge_tda18271_config = { + .std_map = &hauppauge_tda18271_std_map, + .gate = TDA18271_GATE_DIGITAL, + .output_opt = TDA18271_OUTPUT_LT_OFF, +}; + +/* * CX18_CARD_LEADTEK_DVR3100H */ /* Information/confirmation of proper config values provided by Terry Wu */ @@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed) switch (cx->card->type) { case CX18_CARD_HVR_1600_ESMT: case CX18_CARD_HVR_1600_SAMSUNG: + case CX18_CARD_HVR_1600_S5H1411: v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); v |= 0x00400000; /* Serial Mode */ v |= 0x00002000; /* Data Length - Byte */ @@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream) ret = 0; } break; + case CX18_CARD_HVR_1600_S5H1411: + dvb->fe = dvb_attach(s5h1411_attach, + &hcw_s5h1411_config, + &cx->i2c_adap[0]); + if (dvb->fe != NULL) + dvb_attach(tda18271_attach, dvb->fe, + 0x60, &cx->i2c_adap[0], + &hauppauge_tda18271_config); + break; case CX18_CARD_LEADTEK_DVR3100H: dvb->fe = dvb_attach(zl10353_attach, &leadtek_dvr3100h_demod, diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c index ed3d8f55029b..307ff543c254 100644 --- a/drivers/media/video/cx23885/cx23885-i2c.c +++ b/drivers/media/video/cx23885/cx23885-i2c.c @@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap, if (!i2c_wait_done(i2c_adap)) goto eio; - if (!i2c_slave_did_ack(i2c_adap)) { - retval = -ENXIO; - goto err; - } if (i2c_debug) { printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); if (!(ctrl & I2C_NOSTOP)) @@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap, eio: retval = -EIO; - err: if (i2c_debug) printk(KERN_ERR " ERR: %d\n", retval); return retval; @@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap, if (!i2c_wait_done(i2c_adap)) goto eio; - if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) { - retval = -ENXIO; - goto err; - } msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; if (i2c_debug) { dprintk(1, " %02x", msg->buf[cnt]); @@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap, eio: retval = -EIO; - err: if (i2c_debug) printk(KERN_ERR " ERR: %d\n", retval); return retval; diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c index 6fc09dd41b9d..35796e035247 100644 --- a/drivers/media/video/cx25840/cx25840-core.c +++ b/drivers/media/video/cx25840/cx25840-core.c @@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client, kfree(state); return err; } - v4l2_ctrl_cluster(2, &state->volume); + if (!is_cx2583x(state)) + v4l2_ctrl_cluster(2, &state->volume); v4l2_ctrl_handler_setup(&state->hdl); if (client->dev.platform_data) { diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index 9b4faf009196..9c29e964d400 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c @@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv) static void ivtv_irq_dma_err(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; + u32 status; del_timer(&itv->dma_timer); + ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); + status = read_reg(IVTV_REG_DMASTATUS); IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], - read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); - write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); + status, itv->cur_dma_stream); + /* + * We do *not* write back to the IVTV_REG_DMASTATUS register to + * clear the error status, if either the encoder write (0x02) or + * decoder read (0x01) bus master DMA operation do not indicate + * completed. We can race with the DMA engine, which may have + * transitioned to completed status *after* we read the register. + * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the + * DMA engine has completed, will cause the DMA engine to stop working. + */ + status &= 0x3; + if (status == 0x3) + write_reg(status, IVTV_REG_DMASTATUS); + if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; - /* retry */ - if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) + if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { + /* retry */ + /* + * FIXME - handle cases of DMA error similar to + * encoder below, except conditioned on status & 0x1 + */ ivtv_dma_dec_start(s); - else - ivtv_dma_enc_start(s); - return; + return; + } else { + if ((status & 0x2) == 0) { + /* + * CX2341x Bus Master DMA write is ongoing. + * Reset the timer and let it complete. + */ + itv->dma_timer.expires = + jiffies + msecs_to_jiffies(600); + add_timer(&itv->dma_timer); + return; + } + + if (itv->dma_retries < 3) { + /* + * CX2341x Bus Master DMA write has ended. + * Retry the write, starting with the first + * xfer segment. Just retrying the current + * segment is not sufficient. + */ + s->sg_processed = 0; + itv->dma_retries++; + ivtv_dma_enc_start_xfer(s); + return; + } + /* Too many retries, give up on this one */ + } + } if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { ivtv_udma_start(itv); diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c index c179041d91f8..e7e717800ee2 100644 --- a/drivers/media/video/mem2mem_testdev.c +++ b/drivers/media/video/mem2mem_testdev.c @@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev) v4l2_m2m_release(dev->m2m_dev); del_timer_sync(&dev->timer); video_unregister_device(dev->vfd); - video_device_release(dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index b63f8cafa671..561909b65ce6 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c @@ -57,7 +57,7 @@ #include <linux/usb.h> #define S2255_MAJOR_VERSION 1 -#define S2255_MINOR_VERSION 20 +#define S2255_MINOR_VERSION 21 #define S2255_RELEASE 0 #define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ S2255_MINOR_VERSION, \ @@ -312,9 +312,9 @@ struct s2255_fh { }; /* current cypress EEPROM firmware version */ -#define S2255_CUR_USB_FWVER ((3 << 8) | 6) +#define S2255_CUR_USB_FWVER ((3 << 8) | 11) /* current DSP FW version */ -#define S2255_CUR_DSP_FWVER 8 +#define S2255_CUR_DSP_FWVER 10102 /* Need DSP version 5+ for video status feature */ #define S2255_MIN_DSP_STATUS 5 #define S2255_MIN_DSP_COLORFILTER 8 @@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in, static void s2255_reset_dsppower(struct s2255_dev *dev) { - s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1); + s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1); msleep(10); s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); + msleep(600); + s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1); return; } diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index e9a3eab7b0cf..8c1d85e27be4 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -621,7 +621,7 @@ static int __init memstick_init(void) { int rc; - workqueue = create_freezeable_workqueue("kmemstick"); + workqueue = create_freezable_workqueue("kmemstick"); if (!workqueue) return -ENOMEM; diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index f71f22948477..1735c84ff757 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.17" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" +#define MPT_LINUX_VERSION_COMMON "3.04.18" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index a3856ed90aef..e8deb8ed0499 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) } static int +mptctl_release(struct inode *inode, struct file *filep) +{ + fasync_helper(-1, filep, 0, &async_queue); + return 0; +} + +static int mptctl_fasync(int fd, struct file *filep, int mode) { MPT_ADAPTER *ioc; @@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = { .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, + .release = mptctl_release, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, #endif diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 59b8f53d1ece..0d9b82a44540 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) } out: - printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", - ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); + printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", + ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, + SCpnt, SCpnt->serial_number); return retval; } @@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { - retval = SUCCESS; + retval = 0; goto out; } diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 6a1f94042612..c45e6305b26f 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) unsigned long flags; struct asic3 *asic; - desc->chip->ack(irq); + desc->irq_data.chip->irq_ack(&desc->irq_data); - asic = desc->handler_data; + asic = get_irq_data(irq); for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { u32 status; diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index 33c923d215c7..fdd8a1b8bc67 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c @@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev) /* Voice codec interface client */ cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; - cell->name = "davinci_vcif"; + cell->name = "davinci-vcif"; cell->driver_data = davinci_vc; /* Voice codec CQ93VC client */ cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; - cell->name = "cq93vc"; + cell->name = "cq93vc-codec"; cell->driver_data = davinci_vc; ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 627cf577b16d..e9018d1394ee 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client, static inline int __tps6586x_writes(struct i2c_client *client, int reg, int len, uint8_t *val) { - int ret; + int ret, i; - ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); - if (ret < 0) { - dev_err(&client->dev, "failed writings to 0x%02x\n", reg); - return ret; + for (i = 0; i < len; i++) { + ret = __tps6586x_write(client, reg + i, *(val + i)); + if (ret < 0) + return ret; } return 0; diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 000cb414a78a..92b85e28a15e 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c @@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) idev->close = ucb1x00_ts_close; __set_bit(EV_ABS, idev->evbit); - __set_bit(ABS_X, idev->absbit); - __set_bit(ABS_Y, idev->absbit); - __set_bit(ABS_PRESSURE, idev->absbit); input_set_drvdata(idev, ts); + ucb1x00_adc_enable(ts->ucb); + ts->x_res = ucb1x00_ts_read_xres(ts); + ts->y_res = ucb1x00_ts_read_yres(ts); + ucb1x00_adc_disable(ts->ucb); + + input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); + input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); + input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); + err = input_register_device(idev); if (err) goto fail; diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 41233c7fa581..f4016a075fd6 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev) struct wm8994 *wm8994 = dev_get_drvdata(dev); int ret; + /* Don't actually go through with the suspend if the CODEC is + * still active (eg, for audio passthrough from CP. */ + ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1); + if (ret < 0) { + dev_err(dev, "Failed to read power status: %d\n", ret); + } else if (ret & WM8994_VMID_SEL_MASK) { + dev_dbg(dev, "CODEC still active, ignoring suspend\n"); + return 0; + } + /* GPIO configuration state is saved here since we may be configuring * the GPIO alternate functions even if we're not using the gpiolib * driver for them. @@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev) if (ret < 0) dev_err(dev, "Failed to save LDO registers: %d\n", ret); + wm8994->suspended = true; + ret = regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { @@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev) struct wm8994 *wm8994 = dev_get_drvdata(dev); int ret; + /* We may have lied to the PM core about suspending */ + if (!wm8994->suspended) + return 0; + ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { @@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev) if (ret < 0) dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); + wm8994->suspended = false; + return 0; } #endif diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 63ee4c1a5315..b6e1c9a6679e 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c @@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = { { "bmp085", 0 }, { } }; +MODULE_DEVICE_TABLE(i2c, bmp085_id); static struct i2c_driver bmp085_driver = { .driver = { diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 5f6852dff40b..44d4475a09dd 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -329,7 +329,7 @@ static int __init tifm_init(void) { int rc; - workqueue = create_freezeable_workqueue("tifm"); + workqueue = create_freezable_workqueue("tifm"); if (!workqueue) return -ENOMEM; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 4d2ea8e80140..6df5a55da110 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -785,7 +785,7 @@ static int __init vmballoon_init(void) if (x86_hyper != &x86_hyper_vmware) return -ENODEV; - vmballoon_wq = create_freezeable_workqueue("vmmemctl"); + vmballoon_wq = create_freezable_workqueue("vmmemctl"); if (!vmballoon_wq) { pr_err("failed to create workqueue\n"); return -ENOMEM; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 6625c057be05..150b5f3cd401 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work) * still present */ if (host->bus_ops && host->bus_ops->detect && !host->bus_dead - && mmc_card_is_removable(host)) + && !(host->caps & MMC_CAP_NONREMOVABLE)) host->bus_ops->detect(host); /* diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 5c4a54d9b6a4..ebc62ad4cc56 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host) */ mmc_release_host(host); err = mmc_add_card(host->card); - mmc_claim_host(host); if (err) goto remove_added; @@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host) goto remove_added; } + mmc_claim_host(host); return 0; remove_added: /* Remove without lock if the device has been added. */ - mmc_release_host(host); mmc_sdio_remove(host); mmc_claim_host(host); remove: diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index a8c3e1c9b02a..4aaa88f8ab5f 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -1230,10 +1230,32 @@ static int inval_cache_and_wait_for_operation( sleep_time = chip_op_time / 2; for (;;) { + if (chip->state != chip_state) { + /* Someone's suspended the operation: sleep */ + DECLARE_WAITQUEUE(wait, current); + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&chip->wq, &wait); + mutex_unlock(&chip->mutex); + schedule(); + remove_wait_queue(&chip->wq, &wait); + mutex_lock(&chip->mutex); + continue; + } + status = map_read(map, cmd_adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; + if (chip->erase_suspended && chip_state == FL_ERASING) { + /* Erase suspend occured while sleep: reset timeout */ + timeo = reset_timeo; + chip->erase_suspended = 0; + } + if (chip->write_suspended && chip_state == FL_WRITING) { + /* Write suspend occured while sleep: reset timeout */ + timeo = reset_timeo; + chip->write_suspended = 0; + } if (!timeo) { map_write(map, CMD(0x70), cmd_adr); chip->state = FL_STATUS; @@ -1257,27 +1279,6 @@ static int inval_cache_and_wait_for_operation( timeo--; } mutex_lock(&chip->mutex); - - while (chip->state != chip_state) { - /* Someone's suspended the operation: sleep */ - DECLARE_WAITQUEUE(wait, current); - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - mutex_unlock(&chip->mutex); - schedule(); - remove_wait_queue(&chip->wq, &wait); - mutex_lock(&chip->mutex); - } - if (chip->erase_suspended && chip_state == FL_ERASING) { - /* Erase suspend occured while sleep: reset timeout */ - timeo = reset_timeo; - chip->erase_suspended = 0; - } - if (chip->write_suspended && chip_state == FL_WRITING) { - /* Write suspend occured while sleep: reset timeout */ - timeo = reset_timeo; - chip->write_suspended = 0; - } } /* Done and happy. */ diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index d72a5fb2d041..4e1be51cc122 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) } -static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) +static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index) { int i,num_erase_regions; uint8_t uaddr; - if (! (jedec_table[index].devtypes & p_cfi->device_type)) { + if (!(jedec_table[index].devtypes & cfi->device_type)) { DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", - jedec_table[index].name, 4 * (1<<p_cfi->device_type)); + jedec_table[index].name, 4 * (1<<cfi->device_type)); return 0; } @@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) num_erase_regions = jedec_table[index].nr_regions; - p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); - if (!p_cfi->cfiq) { + cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); + if (!cfi->cfiq) { //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); return 0; } - memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); + memset(cfi->cfiq, 0, sizeof(struct cfi_ident)); - p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; - p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; - p_cfi->cfiq->DevSize = jedec_table[index].dev_size; - p_cfi->cfi_mode = CFI_MODE_JEDEC; + cfi->cfiq->P_ID = jedec_table[index].cmd_set; + cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; + cfi->cfiq->DevSize = jedec_table[index].dev_size; + cfi->cfi_mode = CFI_MODE_JEDEC; + cfi->sector_erase_cmd = CMD(0x30); for (i=0; i<num_erase_regions; i++){ - p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; + cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; } - p_cfi->cmdset_priv = NULL; + cfi->cmdset_priv = NULL; /* This may be redundant for some cases, but it doesn't hurt */ - p_cfi->mfr = jedec_table[index].mfr_id; - p_cfi->id = jedec_table[index].dev_id; + cfi->mfr = jedec_table[index].mfr_id; + cfi->id = jedec_table[index].dev_id; uaddr = jedec_table[index].uaddr; @@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) our brains explode when we see the datasheets talking about address lines numbered from A-1 to A18. The CFI table has unlock addresses in device-words according to the mode the device is connected in */ - p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; - p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; + cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type; + cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type; return 1; /* ok */ } @@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", __func__, cfi->mfr, cfi->id, cfi->addr_unlock1, cfi->addr_unlock2 ); - if (!cfi_jedec_setup(cfi, i)) + if (!cfi_jedec_setup(map, cfi, i)) return 0; goto ok_out; } diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 77d64ce19e9f..92de7e3a49a5 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -151,6 +151,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev, printk(KERN_ERR MOD_NAME " %s(): Unable to register resource %pR - kernel bug?\n", __func__, &window->rsrc); + return -EBUSY; } diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index cb20c67995d8..e0a2373bf0e2 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -413,7 +413,6 @@ error3: error2: list_del(&new->list); error1: - kfree(new); return ret; } diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 15682ec8530e..28af71c61834 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void) module_init(omap_nand_init); module_exit(omap_nand_exit); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index d9d7efbc77cc..6322d1fb5d62 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c @@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) init_completion(&dev->dma_done); - dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); + dev->card_workqueue = create_freezable_workqueue(DRV_NAME); if (!dev->card_workqueue) goto error9; diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index e78914938c5c..ac08750748a3 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c @@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = { .remove = __devexit_p(generic_onenand_remove), }; -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); static int __init generic_onenand_init(void) { diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index ac31f461cc1c..c849cacf4b2f 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -860,7 +860,7 @@ static void __exit omap2_onenand_exit(void) module_init(omap2_onenand_init); module_exit(omap2_onenand_exit); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 67822cf6c025..ac0d6a8613b5 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = { static __init int sm_module_init(void) { int error = 0; - cache_flush_workqueue = create_freezeable_workqueue("smflush"); + cache_flush_workqueue = create_freezable_workqueue("smflush"); if (IS_ERR(cache_flush_workqueue)) return PTR_ERR(cache_flush_workqueue); diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 39214e512452..7ca0eded2561 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data) int csr0, boguscnt; int handled = 0; - if (dev == NULL) { - printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n"); - return IRQ_NONE; - } - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 653c62475cb6..8849699c66c4 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -22,7 +22,7 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.62.00-5" +#define DRV_MODULE_VERSION "1.62.00-6" #define DRV_MODULE_RELDATE "2011/01/30" #define BNX2X_BC_VER 0x040200 @@ -1211,6 +1211,7 @@ struct bnx2x { /* DCBX Negotation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; + u32 pending_max; }; /** @@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_BTR 4 #define MAX_SPQ_PENDING 8 - -/* CMNG constants - derived from lab experiments, and not from system spec calculations !!! */ -#define DEF_MIN_RATE 100 -/* resolution of the rate shaping timer - 100 usec */ -#define RS_PERIODIC_TIMEOUT_USEC 100 -/* resolution of fairness algorithm in usecs - - coefficient for calculating the actual t fair */ -#define T_FAIR_COEF 10000000 +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 /* number of bytes in single QM arbitration cycle - - coefficient for calculating the fairness timer */ -#define QM_ARB_BYTES 40000 -#define FAIR_MEM 2 + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* Memory of fairness algorithm . 2 cycles */ +#define FAIR_MEM 2 #define ATTN_NIG_FOR_FUNC (1L << 8) diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..a71b32940533 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, #endif } +/* Timestamp option length allowed for TPA aggregation: + * + * nop nop kind length echo val + */ +#define TPA_TSTAMP_OPT_LEN 12 +/** + * Calculate the approximate value of the MSS for this + * aggregation using the first packet of it. + * + * @param bp + * @param parsing_flags Parsing flags from the START CQE + * @param len_on_bd Total length of the first packet for the + * aggregation. + */ +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, + u16 len_on_bd) +{ + /* TPA arrgregation won't have an IP options and TCP options + * other than timestamp. + */ + u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); + + + /* Check if there was a TCP timestamp, if there is it's will + * always be 12 bytes length: nop nop kind length echo val. + * + * Otherwise FW would close the aggregation. + */ + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) + hdrs_len += TPA_TSTAMP_OPT_LEN; + + return len_on_bd - hdrs_len; +} + static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct sk_buff *skb, struct eth_fast_path_rx_cqe *fp_cqe, - u16 cqe_idx) + u16 cqe_idx, u16 parsing_flags) { struct sw_rx_page *rx_pg, old_rx_pg; u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); @@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* This is needed in order to enable forwarding support */ if (frag_size) - skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, - max(frag_size, (u32)len_on_bd)); + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, + len_on_bd); #ifdef BNX2X_STOP_ON_ERROR if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { @@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (likely(new_skb)) { /* fix ip xsum and give it to the stack */ /* (no need to map the new skb) */ + u16 parsing_flags = + le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); prefetch(skb); prefetch(((char *)(skb)) + L1_CACHE_BYTES); @@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } if (!bnx2x_fill_frag_skb(bp, fp, skb, - &cqe->fast_path_cqe, cqe_idx)) { - if ((le16_to_cpu(cqe->fast_path_cqe. - pars_flags.flags) & PARSING_FLAGS_VLAN)) + &cqe->fast_path_cqe, cqe_idx, + parsing_flags)) { + if (parsing_flags & PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, le16_to_cpu(cqe->fast_path_cqe. vlan_tag)); @@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) { u16 line_speed = bp->link_vars.line_speed; if (IS_MF(bp)) { - u16 maxCfg = (bp->mf_config[BP_VN(bp)] & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - /* Calculate the current MAX line speed limit for the DCC - * capable devices + u16 maxCfg = bnx2x_extract_max_cfg(bp, + bp->mf_config[BP_VN(bp)]); + + /* Calculate the current MAX line speed limit for the MF + * devices */ - if (IS_MF_SD(bp)) { + if (IS_MF_SI(bp)) + line_speed = (line_speed * maxCfg) / 100; + else { /* SD mode */ u16 vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) line_speed = vn_max_rate; - } else /* IS_MF_SI(bp)) */ - line_speed = (line_speed * maxCfg) / 100; + } } return line_speed; @@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp) bnx2x_free_rx_skbs(bp); } +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) +{ + /* load old values */ + u32 mf_cfg = bp->mf_config[BP_VN(bp)]; + + if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { + /* leave all but MAX value */ + mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; + + /* set new MAX value */ + mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) + & FUNC_MF_CFG_MAX_BW_MASK; + + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); + } +} + static void bnx2x_free_msix_irqs(struct bnx2x *bp) { int i, offset = 1; @@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_set_eth_mac(bp, 1); + if (bp->pending_max) { + bnx2x_update_max_mf_config(bp, bp->pending_max); + bp->pending_max = 0; + } + if (bp->port.pmf) bnx2x_initial_phy_init(bp, load_mode); diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..85ea7f26b51f 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp); */ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); +/** + * Updates MAX part of MF configuration in HW + * (if required) + * + * @param bp + * @param value + */ +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); + /* dev_close main block */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); @@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp, void bnx2x_acquire_phy_lock(struct bnx2x *bp); void bnx2x_release_phy_lock(struct bnx2x *bp); +/** + * Extracts MAX BW part from MF configuration. + * + * @param bp + * @param mf_cfg + * + * @return u16 + */ +static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) +{ + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + BNX2X_ERR("Illegal configuration detected for Max BW - " + "using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; +} + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 5b44a8b48509..7e92f9d0dcfd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed |= (cmd->speed_hi << 16); if (IS_MF_SI(bp)) { - u32 param = 0; + u32 part; u32 line_speed = bp->link_vars.line_speed; /* use 10G if no link detected */ @@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) REQ_BC_VER_4_SET_MF_BW); return -EINVAL; } - if (line_speed < speed) { - BNX2X_DEV_INFO("New speed should be less or equal " - "to actual line speed\n"); + + part = (speed * 100) / line_speed; + + if (line_speed < speed || !part) { + BNX2X_DEV_INFO("Speed setting should be in a range " + "from 1%% to 100%% " + "of actual line speed\n"); return -EINVAL; } - /* load old values */ - param = bp->mf_config[BP_VN(bp)]; - - /* leave only MIN value */ - param &= FUNC_MF_CFG_MIN_BW_MASK; - /* set new MAX value */ - param |= (((speed * 100) / line_speed) - << FUNC_MF_CFG_MAX_BW_SHIFT) - & FUNC_MF_CFG_MAX_BW_MASK; + if (bp->state != BNX2X_STATE_OPEN) + /* store value for following "load" */ + bp->pending_max = part; + else + bnx2x_update_max_mf_config(bp, part); - bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); return 0; } @@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) { 0x100, 0x350 }, /* manuf_info */ { 0x450, 0xf0 }, /* feature_info */ { 0x640, 0x64 }, /* upgrade_key_info */ - { 0x6a4, 0x64 }, { 0x708, 0x70 }, /* manuf_key_info */ - { 0x778, 0x70 }, { 0, 0 } }; __be32 buf[0x350 / 4]; @@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev, buf[4] = 1; etest->flags |= ETH_TEST_FL_FAILED; } - if (bp->port.pmf) - if (bnx2x_link_test(bp, is_serdes) != 0) { - buf[5] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } + + if (bnx2x_link_test(bp, is_serdes) != 0) { + buf[5] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } #ifdef BNX2X_EXTRA_DEBUG bnx2x_panic_dump(bp); diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index 5a268e9a0895..fa6dbe3f2058 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h @@ -241,7 +241,7 @@ static const struct { /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't * want to handle "system kill" flow at the moment. */ - BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index d584d32c747d..aa032339e321 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) vn_max_rate = 0; } else { + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); + vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If min rate is zero - set it to 1 */ + /* If fairness is enabled (not all min rates are zeroes) and + if current min rate is zero - set it to 1. + This is a requirement of the algorithm. */ if (bp->vn_weight_sum && (vn_min_rate == 0)) vn_min_rate = DEF_MIN_RATE; - vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; + + if (IS_MF_SI(bp)) + /* maxCfg in percents of linkspeed */ + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; + else + /* maxCfg is absolute in 100Mb units */ + vn_max_rate = maxCfg * 100; } DP(NETIF_MSG_IFUP, @@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) m_fair_vn.vn_credit_delta = max_t(u32, (vn_min_rate * (T_FAIR_COEF / (8 * bp->vn_weight_sum))), - (bp->cmng.fair_vars.fair_threshold * 2)); + (bp->cmng.fair_vars.fair_threshold + + MIN_ABOVE_THRESH)); DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", m_fair_vn.vn_credit_delta); } @@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) bnx2x_calc_vn_weight_sum(bp); /* calculate and set min-max rate for each vn */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) - bnx2x_init_vn_minmax(bp, vn); + if (bp->port.pmf) + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, vn); /* always enable rate shaping and fairness */ bp->cmng.flags.cmng_enables |= @@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } - /* indicate link status only if link status actually changed */ - if (prev_link_status != bp->link_vars.link_status) - bnx2x_link_report(bp); - - if (IS_MF(bp)) - bnx2x_link_sync_notify(bp); - if (bp->link_vars.link_up && bp->link_vars.line_speed) { int cmng_fns = bnx2x_get_cmng_fns_mode(bp); @@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "single function mode without fairness\n"); } + + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + + /* indicate link status only if link status actually changed */ + if (prev_link_status != bp->link_vars.link_status) + bnx2x_link_report(bp); } void bnx2x__link_status_update(struct bnx2x *bp) diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index bda60d590fa8..3445ded6674f 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) if (unlikely(bp->panic)) return; + bnx2x_stats_stm[bp->stats_state][event].action(bp); + /* Protect a state change flow */ spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); - if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", state, event, bp->stats_state); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 1024ae158227..a5d5d0b5b155 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port) } /** - * __get_rx_machine_lock - lock the port's RX machine + * __get_state_machine_lock - lock the port's state machines * @port: the port we're looking at * */ -static inline void __get_rx_machine_lock(struct port *port) +static inline void __get_state_machine_lock(struct port *port) { - spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); + spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } /** - * __release_rx_machine_lock - unlock the port's RX machine + * __release_state_machine_lock - unlock the port's state machines * @port: the port we're looking at * */ -static inline void __release_rx_machine_lock(struct port *port) +static inline void __release_state_machine_lock(struct port *port) { - spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); + spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } /** @@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port) } /** - * __initialize_port_locks - initialize a port's RX machine spinlock + * __initialize_port_locks - initialize a port's STATE machine spinlock * @port: the port we're looking at * */ static inline void __initialize_port_locks(struct port *port) { // make sure it isn't called twice - spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); + spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); } //conversions @@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) { rx_states_t last_state; - // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback) - __get_rx_machine_lock(port); - // keep current State Machine state to compare later if it was changed last_state = port->sm_rx_state; @@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) pr_err("%s: An illegal loopback occurred on adapter (%s).\n" "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", port->slave->dev->master->name, port->slave->dev->name); - __release_rx_machine_lock(port); return; } __update_selected(lacpdu, port); @@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) break; } } - __release_rx_machine_lock(port); } /** @@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work) goto re_arm; } + /* Lock around state machines to protect data accessed + * by all (e.g., port->sm_vars). ad_rx_machine may run + * concurrently due to incoming LACPDU. + */ + __get_state_machine_lock(port); + ad_rx_machine(NULL, port); ad_periodic_machine(port); ad_port_selection_logic(port); @@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) // turn off the BEGIN bit, since we already handled it if (port->sm_vars & AD_PORT_BEGIN) port->sm_vars &= ~AD_PORT_BEGIN; + + __release_state_machine_lock(port); } re_arm: @@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u case AD_TYPE_LACPDU: pr_debug("Received LACPDU on port %d\n", port->actor_port_number); + /* Protect against concurrent state machines */ + __get_state_machine_lock(port); ad_rx_machine(lacpdu, port); + __release_state_machine_lock(port); break; case AD_TYPE_MARKER: diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 2c46a154f2c6..b28baff70864 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -264,7 +264,8 @@ struct ad_bond_info { struct ad_slave_info { struct aggregator aggregator; // 802.3ad aggregator structure struct port port; // 802.3ad port structure - spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt + spinlock_t state_machine_lock; /* mutex state machines vs. + incoming LACPDU */ u16 id; }; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) goto open_unlock; } - priv->wq = create_freezeable_workqueue("mcp251x_wq"); + priv->wq = create_freezable_workqueue("mcp251x_wq"); INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 8ba81b3ddd90..5de46a9a77bb 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig @@ -18,7 +18,7 @@ config CAN_SOFTING config CAN_SOFTING_CS tristate "Softing Gmbh CAN pcmcia cards" depends on PCMCIA - select CAN_SOFTING + depends on CAN_SOFTING ---help--- Support for PCMCIA cards from Softing Gmbh & some cards from Vector Gmbh. diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 5157e15e96eb..aeea9f9ff6e8 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = { }; static const struct can_bittiming_const softing_btr_const = { + .name = "softing", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 7ff170cbc7dc..302be4aa69d6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; int kcqe_cnt; + /* status block index must be read before reading other fields */ + rmb(); cp->kwq_con_idx = *cp->kwq_con_idx_ptr; while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { @@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) barrier(); if (status_idx != *cp->kcq1.status_idx_ptr) { status_idx = (u16) *cp->kcq1.status_idx_ptr; + /* status block index must be read first */ + rmb(); cp->kwq_con_idx = *cp->kwq_con_idx_ptr; } else break; @@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) u32 last_status = *info->status_idx_ptr; int kcqe_cnt; + /* status block index must be read before reading the KCQ */ + rmb(); while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { service_kcqes(dev, kcqe_cnt); @@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) break; last_status = *info->status_idx_ptr; + /* status block index must be read before reading the KCQ */ + rmb(); } return last_status; } @@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data) { struct cnic_dev *dev = (struct cnic_dev *) data; struct cnic_local *cp = dev->cnic_priv; - u32 status_idx; + u32 status_idx, new_status_idx; if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) return; - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); + while (1) { + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); - CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); + CNIC_WR16(dev, cp->kcq1.io_addr, + cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, + status_idx, IGU_INT_ENABLE, 1); + break; + } + + new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + + if (new_status_idx != status_idx) + continue; CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + MAX_KCQ_IDX); cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, status_idx, IGU_INT_ENABLE, 1); - } else { - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, - status_idx, IGU_INT_ENABLE, 1); + + break; } } diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059f..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) { int i; - BUG_ON(adapter->debugfs_root == NULL); + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Debugfs support is best effort. @@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) */ static void cleanup_debugfs(struct adapter *adapter) { - BUG_ON(adapter->debugfs_root == NULL); + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Unlike our sister routine cleanup_proc(), we don't need to remove @@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, struct net_device *netdev; /* - * Vet our module parameters. - */ - if (msi != MSI_MSIX && msi != MSI_MSI) { - dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" - " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, - MSI_MSI); - err = -EINVAL; - goto err_out; - } - - /* * Print our driver banner the first time we're called to initialize a * device. */ @@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, /* * Set up our debugfs entries. */ - if (cxgb4vf_debugfs_root) { + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), cxgb4vf_debugfs_root); - if (adapter->debugfs_root == NULL) + if (IS_ERR_OR_NULL(adapter->debugfs_root)) dev_warn(&pdev->dev, "could not create debugfs" " directory"); else @@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, */ err_free_debugfs: - if (adapter->debugfs_root) { + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } @@ -2802,7 +2791,6 @@ err_release_regions: err_disable_device: pci_disable_device(pdev); -err_out: return err; } @@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) /* * Tear down our debugfs entries. */ - if (adapter->debugfs_root) { + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } @@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) } /* + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt + * delivery. + */ +static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) +{ + struct adapter *adapter; + int pidx; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return; + + /* + * Disable all Virtual Interfaces. This will shut down the + * delivery of all ingress packets into the chip for these + * Virtual Interfaces. + */ + for_each_port(adapter, pidx) { + struct net_device *netdev; + struct port_info *pi; + + if (!test_bit(pidx, &adapter->registered_device_map)) + continue; + + netdev = adapter->port[pidx]; + if (!netdev) + continue; + + pi = netdev_priv(netdev); + t4vf_enable_vi(adapter, pi->viid, false, false); + } + + /* + * Free up all Queues which will prevent further DMA and + * Interrupts allowing various internal pathways to drain. + */ + t4vf_free_sge_resources(adapter); +} + +/* * PCI Device registration data structures. */ #define CH_DEVICE(devid, idx) \ @@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { .id_table = cxgb4vf_pci_tbl, .probe = cxgb4vf_pci_probe, .remove = __devexit_p(cxgb4vf_pci_remove), + .shutdown = __devexit_p(cxgb4vf_pci_shutdown), }; /* @@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) { int ret; + /* + * Vet our module parameters. + */ + if (msi != MSI_MSIX && msi != MSI_MSI) { + printk(KERN_WARNING KBUILD_MODNAME + ": bad module parameter msi=%d; must be %d" + " (MSI-X or MSI) or %d (MSI)\n", + msi, MSI_MSIX, MSI_MSI); + return -EINVAL; + } + /* Debugfs support is optional, just warn if this fails */ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!cxgb4vf_debugfs_root) + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) printk(KERN_WARNING KBUILD_MODNAME ": could not create" " debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4vf_driver); - if (ret < 0) + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) debugfs_remove(cxgb4vf_debugfs_root); return ret; } diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475ce..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, delay_idx = 0; ms = delay[0]; - for (i = 0; i < 500; i += ms) { + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; if (delay_idx < ARRAY_SIZE(delay) - 1) diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2a628d17d178..7018bfe408a4 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) int ret; /* free and bail if we are shutting down */ - if (unlikely(!netif_running(ndev))) { + if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { dev_kfree_skb_any(skb); return; } diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d900..461dd6f905f7 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) /* Checksum mode */ dm9000_set_rx_csum_unlocked(dev, db->rx_csum); - /* GPIO0 on pre-activate PHY */ - iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ - iow(db, DM9000_GPR, 0); /* Enable PHY */ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; @@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) unsigned long flags; /* Save previous register address */ - reg_save = readb(db->io_addr); spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); netif_stop_queue(dev); dm9000_reset(db); @@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) return -EAGAIN; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ + iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ + mdelay(1); /* delay needs by DM9000B */ + /* Initialize DM9000 board */ dm9000_reset(db); dm9000_init_dm9000(dev); diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 9d8a20b72fa9..8318ea06cb6d 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp) for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(bp->dev, bp->mii_bus); - if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out_free_mdio_irq; @@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) bp = netdev_priv(dev); bp->dev = dev; + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); spin_lock_init(&bp->lock); diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 55c1711f1688..33e7c45a4fe4 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h @@ -42,7 +42,8 @@ #define GBE_CONFIG_RAM_BASE \ ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) -#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ (iowrite16_rep(base + offset, data, count)) diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3065870cf2a7..2e5022849f18 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) u16 phy_status, phy_1000t_status, phy_ext_status; u16 pci_status; + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1e_rphy(hw, PHY_STATUS, &phy_status); e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); @@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, downshift_task); + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); } @@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) return 0; } +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + void e1000e_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); @@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1000_get_phy_info(&adapter->hw); } @@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) static void e1000_update_phy_info(unsigned long data) { struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + schedule_work(&adapter->update_phy_task); } @@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) u32 link, tctl; int tx_pending = 0; + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { /* Cancel scheduled suspend requests. */ @@ -4337,19 +4372,12 @@ link_up: else ew32(ICS, E1000_ICS_RXDMT0); + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = 1; - /* flush partial descriptors to memory before detecting Tx hang */ - if (adapter->flags2 & FLAG2_DMA_BURST) { - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); - ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); - /* - * no need to flush the writes because the timeout code does - * an er32 first thing - */ - } - /* * With 82571 controllers, LAA may be overwritten due to controller * reset from the other port. Set the appropriate LAA in RAR[0] @@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && (adapter->flags & FLAG_RX_RESTART_NOW))) { e1000e_dump(adapter); @@ -5935,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = er32(WUC); eeprom_apme_mask = E1000_WUC_APME; - if (eeprom_data & E1000_WUC_PHY_WAKE) + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2a71373719ae..cd0282d5d40f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, - } + }, + { } }; static unsigned char macaddr[ETH_ALEN]; diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0dd..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + netif_carrier_off(dev); + dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 74486a8b009a..af3822f9ea9a 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c @@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) * The parameter rar_count will usually be hw->mac.rar_entry_count * unless there are workarounds that change this. **/ -void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, u32 rar_used_count, u32 rar_count) { diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 8753980668c7..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sg; unsigned int i, j, dmacount; unsigned int len; - static const unsigned int bufflen = 4096; + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; unsigned int firstoff = 0; unsigned int lastsize; unsigned int thisoff = 0; @@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, /* only the last buffer may have non-full bufflen */ lastsize = thisoff + thislen; + /* + * lastsize can not be buffer len. + * If it is then adding another buffer with lastsize = 1. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " + "not enough user buffers. We need an extra " + "buffer because lastsize is bufflen.\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); @@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) e_err(drv, "failed to allocated FCoE DDP pool\n"); spin_lock_init(&fcoe->lock); + + /* Extra buffer to be shared by all DDPs for HW work around */ + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (fcoe->extra_ddp_buffer == NULL) { + e_err(drv, "failed to allocated extra DDP buffer\n"); + goto out_extra_ddp_buffer_alloc; + } + + fcoe->extra_ddp_buffer_dma = + dma_map_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + goto out_extra_ddp_buffer_dma; + } } /* Enable L2 eth type filter for FCoE */ @@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) } } #endif + + return; + +out_extra_ddp_buffer_dma: + kfree(fcoe->extra_ddp_buffer); +out_extra_ddp_buffer_alloc: + pci_pool_destroy(fcoe->pool); + fcoe->pool = NULL; } /** @@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) if (fcoe->pool) { for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); pci_pool_destroy(fcoe->pool); fcoe->pool = NULL; } diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h @@ -70,6 +70,8 @@ struct ixgbe_fcoe { spinlock_t lock; struct pci_pool *pool; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + unsigned char *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; }; #endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fbae703b46d7..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) * We need to try and force an autonegotiation * session, then bring up link. */ - hw->mac.ops.setup_sfp(hw); + if (hw->mac.ops.setup_sfp) + hw->mac.ops.setup_sfp(hw); if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) schedule_work(&adapter->multispeed_fiber_task); } else { @@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) unregister_netdev(adapter->netdev); return; } - hw->mac.ops.setup_sfp(hw); + if (hw->mac.ops.setup_sfp) + hw->mac.ops.setup_sfp(hw); if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) /* This will also work for DA Twinax connections */ diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f69e73e2191e..79ccb54ab00c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp) for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(bp->dev, bp->mii_bus); + dev_set_drvdata(&bp->dev->dev, bp->mii_bus); if (mdiobus_register(bp->mii_bus)) goto err_out_free_mdio_irq; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5933621ac3ff..fc27a9926d9e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, vnet_hdr_len = q->vnet_hdr_sz; err = -EINVAL; - if ((len -= vnet_hdr_len) < 0) + if (len < vnet_hdr_len) goto err; + len -= vnet_hdr_len; err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, sizeof(vnet_hdr)); diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h @@ -73,7 +73,7 @@ struct pch_gbe_regs { struct pch_gbe_regs_mac_adr mac_adr[16]; u32 ADDR_MASK; u32 MIIM; - u32 reserve2; + u32 MAC_ADDR_LOAD; u32 RGMII_ST; u32 RGMII_CTRL; u32 reserve3[3]; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 4c9a7d4f3fca..b99e90aca37d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_SHORT_PKT 64 #define DSC_INIT16 0xC000 #define PCH_GBE_DMA_ALIGN 0 +#define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 @@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); + +inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) +{ + iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); +} + /** * pch_gbe_mac_read_mac_addr - Read MAC address * @hw: Pointer to the HW structure @@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info; struct pch_gbe_rx_desc *rx_desc; u32 length; - unsigned char tmp_packet[ETH_HLEN]; unsigned int i; unsigned int cleaned_count = 0; bool cleaned = false; - struct sk_buff *skb; + struct sk_buff *skb, *new_skb; u8 dma_status; u16 gbec_status; u32 tcp_ip_status; - u8 skb_copy_flag = 0; - u8 skb_padding_flag = 0; i = rx_ring->next_to_clean; @@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, pr_err("Receive CRC Error\n"); } else { /* get receive length */ - /* length convert[-3], padding[-2] */ - length = (rx_desc->rx_words_eob) - 3 - 2; + /* length convert[-3] */ + length = (rx_desc->rx_words_eob) - 3; /* Decide the data conversion method */ if (!adapter->rx_csum) { /* [Header:14][payload] */ - skb_padding_flag = 0; - skb_copy_flag = 1; + if (NET_IP_ALIGN) { + /* Because alignment differs, + * the new_skb is newly allocated, + * and data is copied to new_skb.*/ + new_skb = netdev_alloc_skb(netdev, + length + NET_IP_ALIGN); + if (!new_skb) { + /* dorrop error */ + pr_err("New skb allocation " + "Error\n"); + goto dorrop; + } + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(new_skb->data, skb->data, + length); + skb = new_skb; + } else { + /* DMA buffer is used as SKB as it is.*/ + buffer_info->skb = NULL; + } } else { /* [Header:14][padding:2][payload] */ - skb_padding_flag = 1; - if (length < copybreak) - skb_copy_flag = 1; - else - skb_copy_flag = 0; - } - - /* Data conversion */ - if (skb_copy_flag) { /* recycle skb */ - struct sk_buff *new_skb; - new_skb = - netdev_alloc_skb(netdev, - length + NET_IP_ALIGN); - if (new_skb) { - if (!skb_padding_flag) { - skb_reserve(new_skb, - NET_IP_ALIGN); + /* The length includes padding length */ + length = length - PCH_GBE_DMA_PADDING; + if ((length < copybreak) || + (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { + /* Because alignment differs, + * the new_skb is newly allocated, + * and data is copied to new_skb. + * Padding data is deleted + * at the time of a copy.*/ + new_skb = netdev_alloc_skb(netdev, + length + NET_IP_ALIGN); + if (!new_skb) { + /* dorrop error */ + pr_err("New skb allocation " + "Error\n"); + goto dorrop; } + skb_reserve(new_skb, NET_IP_ALIGN); memcpy(new_skb->data, skb->data, - length); - /* save the skb - * in buffer_info as good */ + ETH_HLEN); + memcpy(&new_skb->data[ETH_HLEN], + &skb->data[ETH_HLEN + + PCH_GBE_DMA_PADDING], + length - ETH_HLEN); skb = new_skb; - } else if (!skb_padding_flag) { - /* dorrop error */ - pr_err("New skb allocation Error\n"); - goto dorrop; + } else { + /* Padding data is deleted + * by moving header data.*/ + memmove(&skb->data[PCH_GBE_DMA_PADDING], + &skb->data[0], ETH_HLEN); + skb_reserve(skb, NET_IP_ALIGN); + buffer_info->skb = NULL; } - } else { - buffer_info->skb = NULL; } - if (skb_padding_flag) { - memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); - memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], - ETH_HLEN); - skb_reserve(skb, NET_IP_ALIGN); - - } - + /* The length includes FCS length */ + length = length - ETH_FCS_LEN; /* update status of driver */ adapter->stats.rx_bytes += length; adapter->stats.rx_packets++; @@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; pch_gbe_set_ethtool_ops(netdev); + pch_gbe_mac_load_mac_addr(&adapter->hw); pch_gbe_mac_reset_hw(&adapter->hw); /* setup the private structure */ diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 9226cda4d054..530ab5a10bd3 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), PCMCIA_DEVICE_NULL, }; diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 27e6f6d43cac..e3ebd90ae651 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -49,8 +49,8 @@ #include <asm/processor.h> #define DRV_NAME "r6040" -#define DRV_VERSION "0.26" -#define DRV_RELDATE "30May2010" +#define DRV_VERSION "0.27" +#define DRV_RELDATE "23Feb2011" /* PHY CHIP Address */ #define PHY1_ADDR 1 /* For MAC1 */ @@ -69,6 +69,8 @@ /* MAC registers */ #define MCR0 0x00 /* Control register 0 */ +#define MCR0_PROMISC 0x0020 /* Promiscuous mode */ +#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ #define MCR1 0x04 /* Control register 1 */ #define MAC_RST 0x0001 /* Reset the MAC */ #define MBCR 0x08 /* Bus control */ @@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - u16 *adrp; - u16 reg; unsigned long flags; struct netdev_hw_addr *ha; int i; + u16 *adrp; + u16 hash_table[4] = { 0 }; + + spin_lock_irqsave(&lp->lock, flags); - /* MAC Address */ + /* Keep our MAC Address */ adrp = (u16 *)dev->dev_addr; iowrite16(adrp[0], ioaddr + MID_0L); iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); - /* Promiscous Mode */ - spin_lock_irqsave(&lp->lock, flags); - /* Clear AMCP & PROM bits */ - reg = ioread16(ioaddr) & ~0x0120; - if (dev->flags & IFF_PROMISC) { - reg |= 0x0020; - lp->mcr0 |= 0x0020; - } - /* Too many multicast addresses - * accept all traffic */ - else if ((netdev_mc_count(dev) > MCAST_MAX) || - (dev->flags & IFF_ALLMULTI)) - reg |= 0x0020; + lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN); - iowrite16(reg, ioaddr); - spin_unlock_irqrestore(&lp->lock, flags); + /* Promiscuous mode */ + if (dev->flags & IFF_PROMISC) + lp->mcr0 |= MCR0_PROMISC; - /* Build the hash table */ - if (netdev_mc_count(dev) > MCAST_MAX) { - u16 hash_table[4]; - u32 crc; + /* Enable multicast hash table function to + * receive all multicast packets. */ + else if (dev->flags & IFF_ALLMULTI) { + lp->mcr0 |= MCR0_HASH_EN; - for (i = 0; i < 4; i++) - hash_table[i] = 0; + for (i = 0; i < MCAST_MAX ; i++) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + } + for (i = 0; i < 4; i++) + hash_table[i] = 0xffff; + } + /* Use internal multicast address registers if the number of + * multicast addresses is not greater than MCAST_MAX. */ + else if (netdev_mc_count(dev) <= MCAST_MAX) { + i = 0; netdev_for_each_mc_addr(ha, dev) { - char *addrs = ha->addr; + u16 *adrp = (u16 *) ha->addr; + iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); + iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); + iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); + i++; + } + while (i < MCAST_MAX) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + i++; + } + } + /* Otherwise, Enable multicast hash table function. */ + else { + u32 crc; - if (!(*addrs & 1)) - continue; + lp->mcr0 |= MCR0_HASH_EN; + + for (i = 0; i < MCAST_MAX ; i++) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + } - crc = ether_crc_le(6, addrs); + /* Build multicast hash table */ + netdev_for_each_mc_addr(ha, dev) { + u8 *addrs = ha->addr; + + crc = ether_crc(ETH_ALEN, addrs); crc >>= 26; - hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + hash_table[crc >> 4] |= 1 << (crc & 0xf); } - /* Fill the MAC hash tables with their values */ + } + + iowrite16(lp->mcr0, ioaddr + MCR0); + + /* Fill the MAC hash tables with their values */ + if (lp->mcr0 && MCR0_HASH_EN) { iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); iowrite16(hash_table[2], ioaddr + MAR2); iowrite16(hash_table[3], ioaddr + MAR3); } - /* Multicast Address 1~4 case */ - i = 0; - netdev_for_each_mc_addr(ha, dev) { - if (i >= MCAST_MAX) - break; - adrp = (u16 *) ha->addr; - iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); - iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); - iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); - i++; - } - while (i < MCAST_MAX) { - iowrite16(0xffff, ioaddr + MID_1L + 8 * i); - iowrite16(0xffff, ioaddr + MID_1M + 8 * i); - iowrite16(0xffff, ioaddr + MID_1H + 8 * i); - i++; - } + + spin_unlock_irqrestore(&lp->lock, flags); } static void netdev_get_drvinfo(struct net_device *dev, diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 59ccf0c5c610..7ffdb80adf40 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -25,6 +25,7 @@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> +#include <linux/pci-aspm.h> #include <asm/system.h> #include <asm/io.h> @@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) } } -static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) { + void __iomem *ioaddr = tp->mmio_addr; int i; RTL_W8(ERIDR, cmd); @@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) break; } - ocp_write(ioaddr, 0x1, 0x30, 0x00000001); + ocp_write(tp, 0x1, 0x30, 0x00000001); } #define OOB_CMD_RESET 0x00 @@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if (tp->mac_version == RTL_GIGA_MAC_VER_27) + if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || + (tp->mac_version == RTL_GIGA_MAC_VER_28)) && + (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { return; + } if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || (tp->mac_version == RTL_GIGA_MAC_VER_24)) && @@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; } @@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if (tp->mac_version == RTL_GIGA_MAC_VER_27) + if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || + (tp->mac_version == RTL_GIGA_MAC_VER_28)) && + (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { return; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; } @@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->reg_num_mask = 0x1f; mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pci_enable_device(pdev); if (rc < 0) { @@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = PCIMulRW | RxChkSum; + tp->cp_cmd = RxChkSum; if ((sizeof(dma_addr_t) > 4) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { @@ -3190,6 +3207,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); + netif_carrier_off(dev); + out: return rc; @@ -3316,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) /* Disable interrupts */ rtl8169_irq_mask_and_ack(ioaddr); - if (tp->mac_version == RTL_GIGA_MAC_VER_28) { + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28) { while (RTL_R8(TxPoll) & NPQ) udelay(20); @@ -3845,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev) Cxpl_dbg_sel | \ ASF | \ PktCntrDisable | \ - PCIDAC | \ - PCIMulRW) + Mac_dbgo_sel) static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) { @@ -3876,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) RTL_W8(Config1, cfg1 & ~LEDS0); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); - rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); } @@ -3889,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) @@ -3916,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev) } } + RTL_W8(Cfg9346, Cfg9346_Unlock); + switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: rtl_hw_start_8102e_1(ioaddr, pdev); @@ -3930,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev) break; } - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Cfg9346, Cfg9346_Lock); RTL_W8(MaxTxPacketSize, TxPacketMax); rtl_set_rx_max_size(ioaddr, rx_buf_sz); - tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; - + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; RTL_W16(CPlusCmd, tp->cp_cmd); RTL_W16(IntrMitigate, 0x0000); @@ -3947,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); - RTL_W8(Cfg9346, Cfg9346_Lock); - RTL_R8(IntrMask); rtl_set_rx_mode(dev); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); RTL_W16(IntrMask, tp->intr_event); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60d..ca886d98bdc7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_self_tests efx_tests; + struct efx_self_tests *efx_tests; int already_up; - int rc; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + ASSERT_RTNL(); if (efx->state != STATE_RUNNING) { @@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); - goto fail2; + goto fail1; } } - memset(&efx_tests, 0, sizeof(efx_tests)); - - rc = efx_selftest(efx, &efx_tests, test->flags); + rc = efx_selftest(efx, efx_tests, test->flags); if (!already_up) dev_close(efx->net_dev); @@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - fail2: - fail1: +fail1: /* Fill ethtool results structures */ - efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: if (rc) test->flags |= ETH_TEST_FL_FAILED; } diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 42daf98ba736..35b28f42d208 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - /* device is off until link detection */ - netif_carrier_off(dev); - return dev; } diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 64bfdae5956f..d70bde95460b 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev) smsc911x_reg_write(pdata, HW_CFG, 0x00050000); smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); + /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); + spin_unlock_irq(&pdata->mac_lock); + /* Make sure EEPROM has finished loading before setting GPIO_CFG */ timeout = 50; while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) priv->hw = device; - if (device_can_wakeup(priv->device)) + if (device_can_wakeup(priv->device)) { priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + enable_irq_wake(dev->irq); + } return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d366611..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !netif_running(dev))) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !netif_running(dev))) return -EAGAIN; spin_lock_bh(&tp->lock); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9fb..5002f5be47be 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { .driver_info = (unsigned long)&dm9601_info, }, { + USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, + { USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ .driver_info = (unsigned long)&dm9601_info, }, diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2628,15 +2628,15 @@ exit: static void hso_free_tiomget(struct hso_serial *serial) { - struct hso_tiocmget *tiocmget = serial->tiocmget; + struct hso_tiocmget *tiocmget; + if (!serial) + return; + tiocmget = serial->tiocmget; if (tiocmget) { - if (tiocmget->urb) { - usb_free_urb(tiocmget->urb); - tiocmget->urb = NULL; - } + usb_free_urb(tiocmget->urb); + tiocmget->urb = NULL; serial->tiocmget = NULL; kfree(tiocmget); - } } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -931,8 +931,10 @@ fail_halt: if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); - if (status < 0) + if (status < 0) { + usb_free_urb(urb); goto fail_lowmem; + } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad1..62ce2f4e8605 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) return 0; } +/* + * Wait for synth to settle + */ +static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + /* + * On 5211+ read activation -> rx delay + * and use it (100ns steps). + */ + if (ah->ah_version != AR5K_AR5210) { + u32 delay; + delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & + AR5K_PHY_RX_DELAY_M; + delay = (channel->hw_value & CHANNEL_CCK) ? + ((delay << 2) / 22) : (delay / 10); + if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) + delay = delay << 1; + if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) + delay = delay << 2; + /* XXX: /2 on turbo ? Let's be safe + * for now */ + udelay(100 + delay); + } else { + mdelay(1); + } +} + /**********************\ * RF Gain optimization * @@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, case AR5K_RF5111: ret = ath5k_hw_rf5111_channel(ah, channel); break; + case AR5K_RF2317: case AR5K_RF2425: ret = ath5k_hw_rf2425_channel(ah, channel); break; @@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, /* Failed */ if (i >= 100) return -EIO; + + /* Set channel and wait for synth */ + ret = ath5k_hw_channel(ah, channel); + if (ret) + return ret; + + ath5k_hw_wait_for_synth(ah, channel); } /* @@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; + /* Write OFDM timings on 5212*/ + if (ah->ah_version == AR5K_AR5212 && + channel->hw_value & CHANNEL_OFDM) { + + ret = ath5k_hw_write_ofdm_timings(ah, channel); + if (ret) + return ret; + + /* Spur info is available only from EEPROM versions + * greater than 5.3, but the EEPROM routines will use + * static values for older versions */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5424) + ath5k_hw_set_spur_mitigation_filter(ah, + channel); + } + + /* If we used fast channel switching + * we are done, release RF bus and + * fire up NF calibration. + * + * Note: Only NF calibration due to + * channel change, not AGC calibration + * since AGC is still running ! + */ + if (fast) { + /* + * Release RF Bus grant + */ + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, + AR5K_PHY_RFBUS_REQ_REQUEST); + + /* + * Start NF calibration + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_NF); + + return ret; + } + /* * For 5210 we do all initialization using * initvals, so we don't have to modify * any settings (5210 also only supports * a/aturbo modes) */ - if ((ah->ah_version != AR5K_AR5210) && !fast) { + if (ah->ah_version != AR5K_AR5210) { /* * Write initial RF gain settings @@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; - /* Write OFDM timings on 5212*/ - if (ah->ah_version == AR5K_AR5212 && - channel->hw_value & CHANNEL_OFDM) { - - ret = ath5k_hw_write_ofdm_timings(ah, channel); - if (ret) - return ret; - - /* Spur info is available only from EEPROM versions - * greater than 5.3, but the EEPROM routines will use - * static values for older versions */ - if (ah->ah_mac_srev >= AR5K_SREV_AR5424) - ath5k_hw_set_spur_mitigation_filter(ah, - channel); - } - /*Enable/disable 802.11b mode on 5111 (enable 2111 frequency converter + CCK)*/ if (ah->ah_radio == AR5K_RF5111) { @@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); + ath5k_hw_wait_for_synth(ah, channel); + /* - * On 5211+ read activation -> rx delay - * and use it. + * Perform ADC test to see if baseband is ready + * Set tx hold and check adc test register */ - if (ah->ah_version != AR5K_AR5210) { - u32 delay; - delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & - AR5K_PHY_RX_DELAY_M; - delay = (channel->hw_value & CHANNEL_CCK) ? - ((delay << 2) / 22) : (delay / 10); - if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) - delay = delay << 1; - if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) - delay = delay << 2; - /* XXX: /2 on turbo ? Let's be safe - * for now */ - udelay(100 + delay); - } else { - mdelay(1); - } - - if (fast) - /* - * Release RF Bus grant - */ - AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, - AR5K_PHY_RFBUS_REQ_REQUEST); - else { - /* - * Perform ADC test to see if baseband is ready - * Set tx hold and check adc test register - */ - phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); - ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); - for (i = 0; i <= 20; i++) { - if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) - break; - udelay(200); - } - ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); + phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); + ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); + for (i = 0; i <= 20; i++) { + if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) + break; + udelay(200); } + ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); /* * Start automatic gain control calibration diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 23838e37d45f..1a7fa6ea4cf5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -21,7 +21,6 @@ #include <linux/device.h> #include <linux/leds.h> #include <linux/completion.h> -#include <linux/pm_qos_params.h> #include "debug.h" #include "common.h" @@ -57,8 +56,6 @@ struct ath_node; #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define ATH9K_PM_QOS_DEFAULT_VALUE 55 - #define TSF_TO_TU(_h,_l) \ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) @@ -633,8 +630,6 @@ struct ath_softc { struct ath_descdma txsdma; struct ath_ant_comb ant_comb; - - struct pm_qos_request_list pm_qos_req; }; struct ath_wiphy { @@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) extern struct ieee80211_ops ath9k_ops; extern int ath9k_modparam_nohwcrypt; extern int led_blink; -extern int ath9k_pm_qos_value; extern bool is_ath9k_unloaded; irqreturn_t ath_isr(int irq, void *dev); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5ab3084eb9cb..07b1633b7f3f 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) struct tx_buf *tx_buf = NULL; struct sk_buff *nskb = NULL; int ret = 0, i; - u16 *hdr, tx_skb_cnt = 0; + u16 tx_skb_cnt = 0; u8 *buf; + __le16 *hdr; if (hif_dev->tx.tx_skb_cnt == 0) return 0; @@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) buf = tx_buf->buf; buf += tx_buf->offset; - hdr = (u16 *)buf; - *hdr++ = nskb->len; - *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; + hdr = (__le16 *)buf; + *hdr++ = cpu_to_le16(nskb->len); + *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); buf += 4; memcpy(buf, nskb->data, nskb->len); tx_buf->len = nskb->len + 4; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 087a6a95edd5..a033d01bf8a0 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); -int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; -module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); -MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); - bool is_ath9k_unloaded; /* We use the hw_value as an index into our private channel structure */ @@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, ath_init_leds(sc); ath_start_rfkill_poll(sc); - pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - return 0; error_world: @@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) } ieee80211_unregister_hw(hw); - pm_qos_remove_request(&sc->pm_qos_req); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 180170d3ce25..2915b11edefb 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) struct ath_common *common = ath9k_hw_common(ah); if (!(ints & ATH9K_INT_GLOBAL)) - ath9k_hw_enable_interrupts(ah); + ath9k_hw_disable_interrupts(ah); ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); @@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); } - ath9k_hw_enable_interrupts(ah); + if (ints & ATH9K_INT_GLOBAL) + ath9k_hw_enable_interrupts(ah); return; } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index da5c64597c1f..a09d15f7aa6e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1173,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) ath9k_btcoex_timer_resume(sc); } - /* User has the option to provide pm-qos value as a module - * parameter rather than using the default value of - * 'ATH9K_PM_QOS_DEFAULT_VALUE'. - */ - pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); - if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) common->bus_ops->extn_synch_en(common); @@ -1345,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) sc->sc_flags |= SC_OP_INVALID; - pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); - mutex_unlock(&sc->mutex); ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 537732e5964f..f82c400be288 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = { { USB_DEVICE(0x057c, 0x8402) }, /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ { USB_DEVICE(0x1668, 0x1200) }, + /* Airlive X.USB a/b/g/n */ + { USB_DEVICE(0x1b75, 0x9170) }, /* terminate */ {} diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, } #endif -/** - * iwl3945_good_plcp_health - checks for plcp error. - * - * When the plcp error is exceeding the thresholds, reset the radio - * to improve the throughput. - */ -static bool iwl3945_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) -{ - bool rc = true; - struct iwl3945_notif_statistics current_stat; - int combined_plcp_delta; - unsigned int plcp_msec; - unsigned long plcp_received_jiffies; - - if (priv->cfg->base_params->plcp_delta_threshold == - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { - IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); - return rc; - } - memcpy(¤t_stat, pkt->u.raw, sizeof(struct - iwl3945_notif_statistics)); - /* - * check for plcp_err and trigger radio reset if it exceeds - * the plcp error threshold plcp_delta. - */ - plcp_received_jiffies = jiffies; - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - - (long) priv->plcp_jiffies); - priv->plcp_jiffies = plcp_received_jiffies; - /* - * check to make sure plcp_msec is not 0 to prevent division - * by zero. - */ - if (plcp_msec) { - combined_plcp_delta = - (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - - le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); - - if ((combined_plcp_delta > 0) && - ((combined_plcp_delta * 100) / plcp_msec) > - priv->cfg->base_params->plcp_delta_threshold) { - /* - * if plcp_err exceed the threshold, the following - * data is printed in csv format: - * Text: plcp_err exceeded %d, - * Received ofdm.plcp_err, - * Current ofdm.plcp_err, - * combined_plcp_delta, - * plcp_msec - */ - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " - "%u, %d, %u mSecs\n", - priv->cfg->base_params->plcp_delta_threshold, - le32_to_cpu(current_stat.rx.ofdm.plcp_err), - combined_plcp_delta, plcp_msec); - /* - * Reset the RF radio due to the high plcp - * error rate - */ - rc = false; - } - } - return rc; -} - void iwl3945_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { @@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { .isr_ops = { .isr = iwl_isr_legacy, }, - .check_plcp_health = iwl3945_good_plcp_health, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 79ab0a6b1386..537fb8c84e3a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -51,7 +51,7 @@ #include "iwl-agn-debugfs.h" /* Highest firmware API version supported */ -#define IWL5000_UCODE_API_MAX 2 +#define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 /* Lowest firmware API version supported */ diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5b..0494d7b102d4 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, while (i != idx) { u16 len; struct sk_buff *skb; + dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; @@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, len = priv->common.rx_mtu; } + dma_addr = le32_to_cpu(desc->host_addr); + pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { - pci_unmap_single(priv->pdev, - le32_to_cpu(desc->host_addr), - priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); rx_buf[i] = NULL; - desc->host_addr = 0; + desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); + pci_dma_sync_single_for_device(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 21713a7638c4..9b344a921e74 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 848cc2cce247..518542b4bf9e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, __le32 mode; int ret; + if (priv->device_type != RNDIS_BCM4320B) + return -ENOTSUPP; + netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, enabled ? "enabled" : "disabled", timeout); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38af..3b3f1e45ab3e 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; + /* + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) @@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, #endif #ifdef CONFIG_RT2800PCI_RT35XX + { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4c..197a36c05fda 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; + /* + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index ffedfd492754..ea1580085347 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -3,7 +3,7 @@ # menuconfig NFC_DEVICES - bool "NFC devices" + bool "Near Field Communication (NFC) devices" default n ---help--- You'll have to say Y if your computer contains an NFC device that diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c index bae647264dd6..724f65d8f9e4 100644 --- a/drivers/nfc/pn544.c +++ b/drivers/nfc/pn544.c @@ -60,7 +60,7 @@ enum pn544_irq { struct pn544_info { struct miscdevice miscdev; struct i2c_client *i2c_dev; - struct regulator_bulk_data regs[2]; + struct regulator_bulk_data regs[3]; enum pn544_state state; wait_queue_head_t read_wait; @@ -74,6 +74,7 @@ struct pn544_info { static const char reg_vdd_io[] = "Vdd_IO"; static const char reg_vbat[] = "VBat"; +static const char reg_vsim[] = "VSim"; /* sysfs interface */ static ssize_t pn544_test(struct device *dev, @@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client, info->regs[0].supply = reg_vdd_io; info->regs[1].supply = reg_vbat; + info->regs[2].supply = reg_vsim; r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), info->regs); if (r < 0) diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index 28295d0a50f6..4d87b5dc9284 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c @@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata; (p)->unique_id = of_pdt_unique_id++; \ } while (0) -static inline const char *of_pdt_node_name(struct device_node *dp) +static char * __init of_pdt_build_full_name(struct device_node *dp) { - return dp->path_component_name; + int len, ourlen, plen; + char *n; + + dp->path_component_name = build_path_component(dp); + + plen = strlen(dp->parent->full_name); + ourlen = strlen(dp->path_component_name); + len = ourlen + plen + 2; + + n = prom_early_alloc(len); + strcpy(n, dp->parent->full_name); + if (!of_node_is_root(dp->parent)) { + strcpy(n + plen, "/"); + plen++; + } + strcpy(n + plen, dp->path_component_name); + + return n; } -#else +#else /* CONFIG_SPARC */ static inline void of_pdt_incr_unique_id(void *p) { } static inline void irq_trans_init(struct device_node *dp) { } -static inline const char *of_pdt_node_name(struct device_node *dp) +static char * __init of_pdt_build_full_name(struct device_node *dp) { - return dp->name; + static int failsafe_id = 0; /* for generating unique names on failure */ + char *buf; + int len; + + if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len)) + goto failsafe; + + buf = prom_early_alloc(len + 1); + if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len)) + goto failsafe; + return buf; + + failsafe: + buf = prom_early_alloc(strlen(dp->parent->full_name) + + strlen(dp->name) + 16); + sprintf(buf, "%s/%s@unknown%i", + of_node_is_root(dp->parent) ? "" : dp->parent->full_name, + dp->name, failsafe_id++); + pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf); + return buf; } #endif /* !CONFIG_SPARC */ @@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name) return buf; } -static char * __init of_pdt_try_pkg2path(phandle node) -{ - char *res, *buf = NULL; - int len; - - if (!of_pdt_prom_ops->pkg2path) - return NULL; - - if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len)) - return NULL; - buf = prom_early_alloc(len + 1); - if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) { - pr_err("%s: package-to-path failed\n", __func__); - return NULL; - } - - res = strrchr(buf, '/'); - if (!res) { - pr_err("%s: couldn't find / in %s\n", __func__, buf); - return NULL; - } - return res+1; -} - -/* - * When fetching the node's name, first try using package-to-path; if - * that fails (either because the arch hasn't supplied a PROM callback, - * or some other random failure), fall back to just looking at the node's - * 'name' property. - */ -static char * __init of_pdt_build_name(phandle node) -{ - char *buf; - - buf = of_pdt_try_pkg2path(node); - if (!buf) - buf = of_pdt_get_one_property(node, "name"); - - return buf; -} - static struct device_node * __init of_pdt_create_node(phandle node, struct device_node *parent) { @@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, kref_init(&dp->kref); - dp->name = of_pdt_build_name(node); + dp->name = of_pdt_get_one_property(node, "name"); dp->type = of_pdt_get_one_property(node, "device_type"); dp->phandle = node; @@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, return dp; } -static char * __init of_pdt_build_full_name(struct device_node *dp) -{ - int len, ourlen, plen; - char *n; - - plen = strlen(dp->parent->full_name); - ourlen = strlen(of_pdt_node_name(dp)); - len = ourlen + plen + 2; - - n = prom_early_alloc(len); - strcpy(n, dp->parent->full_name); - if (!of_node_is_root(dp->parent)) { - strcpy(n + plen, "/"); - plen++; - } - strcpy(n + plen, of_pdt_node_name(dp)); - - return n; -} - static struct device_node * __init of_pdt_build_tree(struct device_node *parent, phandle node, struct device_node ***nextp) @@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, *(*nextp) = dp; *nextp = &dp->allnext; -#if defined(CONFIG_SPARC) - dp->path_component_name = build_path_component(dp); -#endif dp->full_name = of_pdt_build_full_name(dp); dp->child = of_pdt_build_tree(dp, diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 0bdda5b3ed55..42fbf1a75576 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev) flags |= CONF_ENABLE_IOCARD; if (flags & CONF_ENABLE_IOCARD) s->socket.flags |= SS_IOCARD; + if (flags & CONF_ENABLE_ZVCARD) + s->socket.flags |= SS_ZVCARD | SS_IOCARD; if (flags & CONF_ENABLE_SPKR) { s->socket.flags |= SS_SPKR_ENA; status = CCSR_AUDIO_ENA; diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 3755e7c8c715..2c540542b5af 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c @@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, } #endif -static void pxa2xx_configure_sockets(struct device *dev) +void pxa2xx_configure_sockets(struct device *dev) { struct pcmcia_low_level *ops = dev->platform_data; /* diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h index bb62ea87b8f9..b609b45469ed 100644 --- a/drivers/pcmcia/pxa2xx_base.h +++ b/drivers/pcmcia/pxa2xx_base.h @@ -1,3 +1,4 @@ int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); +void pxa2xx_configure_sockets(struct device *dev); diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c index c3f72192af66..a52039564e74 100644 --- a/drivers/pcmcia/pxa2xx_colibri.c +++ b/drivers/pcmcia/pxa2xx_colibri.c @@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void) { int ret; + if (!machine_is_colibri() && !machine_is_colibri320()) + return -ENODEV; + colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!colibri_pcmcia_device) return -ENOMEM; diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c index b9f8c8fb42bd..25afe637c657 100644 --- a/drivers/pcmcia/pxa2xx_lubbock.c +++ b/drivers/pcmcia/pxa2xx_lubbock.c @@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev) lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); + pxa2xx_configure_sockets(&sadev->dev); ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, pxa2xx_drv_pcmcia_add_one); } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index d163bc2e2b9e..a59af5b24f0a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -227,7 +227,7 @@ config SONYPI_COMPAT config IDEAPAD_LAPTOP tristate "Lenovo IdeaPad Laptop Extras" depends on ACPI - depends on RFKILL + depends on RFKILL && INPUT select INPUT_SPARSEKMAP help This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c5c4b8c32eb8..38b34a73866a 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -84,7 +84,7 @@ MODULE_LICENSE("GPL"); */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" -#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" +#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" @@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev, return -EINVAL; return count; } -static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, +static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 4633fd8532cc..fe495939c307 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device) struct proc_dir_entry *proc; mode_t mode; - /* - * If parameter uid or gid is not changed, keep the default setting for - * our proc entries (-rw-rw-rw-) else, it means we care about security, - * and then set to -rw-rw---- - */ - if ((asus_uid == 0) && (asus_gid == 0)) { - mode = S_IFREG | S_IRUGO | S_IWUGO; + mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; } else { mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; printk(KERN_WARNING " asus_uid and asus_gid parameters are " diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 34657f96b5a5..ad24ef36f9f7 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked) dell_send_request(buffer, 17, 11); /* If the hardware switch controls this radio, and the hardware - switch is disabled, don't allow changing the software state */ + switch is disabled, don't allow changing the software state. + If the hardware switch is reported as not supported, always + fire the SMI to toggle the killswitch. */ if ((hwswitch_state & BIT(hwswitch_bit)) && - !(buffer->output[1] & BIT(16))) { + !(buffer->output[1] & BIT(16)) && + (buffer->output[1] & BIT(0))) { ret = -EINVAL; goto out; } @@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { + int status; + + get_buffer(); + dell_send_request(buffer, 17, 11); + status = buffer->output[1]; + release_buffer(); + + /* if hardware rfkill is not supported, set it explicitly */ + if (!(status & BIT(0))) { + if (wifi_rfkill) + dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17)); + if (bluetooth_rfkill) + dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18)); + if (wwan_rfkill) + dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19)); + } + if (wifi_rfkill) dell_rfkill_query(wifi_rfkill, (void *)1); if (bluetooth_rfkill) diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 930e62762365..61433d492862 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c @@ -60,69 +60,20 @@ enum pmic_gpio_register { #define GPOSW_DOU 0x08 #define GPOSW_RDRV 0x30 +#define GPIO_UPDATE_TYPE 0x80000000 #define NUM_GPIO 24 -struct pmic_gpio_irq { - spinlock_t lock; - u32 trigger[NUM_GPIO]; - u32 dirty; - struct work_struct work; -}; - - struct pmic_gpio { + struct mutex buslock; struct gpio_chip chip; - struct pmic_gpio_irq irqtypes; void *gpiointr; int irq; unsigned irq_base; + unsigned int update_type; + u32 trigger_type; }; -static void pmic_program_irqtype(int gpio, int type) -{ - if (type & IRQ_TYPE_EDGE_RISING) - intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); - else - intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); - - if (type & IRQ_TYPE_EDGE_FALLING) - intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); - else - intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); -}; - -static void pmic_irqtype_work(struct work_struct *work) -{ - struct pmic_gpio_irq *t = - container_of(work, struct pmic_gpio_irq, work); - unsigned long flags; - int i; - u16 type; - - spin_lock_irqsave(&t->lock, flags); - /* As we drop the lock, we may need multiple scans if we race the - pmic_irq_type function */ - while (t->dirty) { - /* - * For each pin that has the dirty bit set send an IPC - * message to configure the hardware via the PMIC - */ - for (i = 0; i < NUM_GPIO; i++) { - if (!(t->dirty & (1 << i))) - continue; - t->dirty &= ~(1 << i); - /* We can't trust the array entry or dirty - once the lock is dropped */ - type = t->trigger[i]; - spin_unlock_irqrestore(&t->lock, flags); - pmic_program_irqtype(i, type); - spin_lock_irqsave(&t->lock, flags); - } - } - spin_unlock_irqrestore(&t->lock, flags); -} - static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { if (offset > 8) { @@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1 << (offset - 16)); } -static int pmic_irq_type(unsigned irq, unsigned type) +/* + * This is called from genirq with pg->buslock locked and + * irq_desc->lock held. We can not access the scu bus here, so we + * store the change and update in the bus_sync_unlock() function below + */ +static int pmic_irq_type(struct irq_data *data, unsigned type) { - struct pmic_gpio *pg = get_irq_chip_data(irq); - u32 gpio = irq - pg->irq_base; - unsigned long flags; + struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); + u32 gpio = data->irq - pg->irq_base; if (gpio >= pg->chip.ngpio) return -EINVAL; - spin_lock_irqsave(&pg->irqtypes.lock, flags); - pg->irqtypes.trigger[gpio] = type; - pg->irqtypes.dirty |= (1 << gpio); - spin_unlock_irqrestore(&pg->irqtypes.lock, flags); - schedule_work(&pg->irqtypes.work); + pg->trigger_type = type; + pg->update_type = gpio | GPIO_UPDATE_TYPE; return 0; } - - static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); @@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) } /* the gpiointr register is read-clear, so just do nothing. */ -static void pmic_irq_unmask(unsigned irq) -{ -}; +static void pmic_irq_unmask(struct irq_data *data) { } -static void pmic_irq_mask(unsigned irq) -{ -}; +static void pmic_irq_mask(struct irq_data *data) { } static struct irq_chip pmic_irqchip = { .name = "PMIC-GPIO", - .mask = pmic_irq_mask, - .unmask = pmic_irq_unmask, - .set_type = pmic_irq_type, + .irq_mask = pmic_irq_mask, + .irq_unmask = pmic_irq_unmask, + .irq_set_type = pmic_irq_type, }; -static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) +static irqreturn_t pmic_irq_handler(int irq, void *data) { - struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); + struct pmic_gpio *pg = data; u8 intsts = *((u8 *)pg->gpiointr + 4); int gpio; + irqreturn_t ret = IRQ_NONE; for (gpio = 0; gpio < 8; gpio++) { if (intsts & (1 << gpio)) { pr_debug("pmic pin %d triggered\n", gpio); generic_handle_irq(pg->irq_base + gpio); + ret = IRQ_HANDLED; } } - - if (desc->chip->irq_eoi) - desc->chip->irq_eoi(irq_get_irq_data(irq)); - else - dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq); + return ret; } static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) @@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) pg->chip.can_sleep = 1; pg->chip.dev = dev; - INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); - spin_lock_init(&pg->irqtypes.lock); + mutex_init(&pg->buslock); pg->chip.dev = dev; retval = gpiochip_add(&pg->chip); @@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); goto err; } - set_irq_data(pg->irq, pg); - set_irq_chained_handler(pg->irq, pmic_irq_handler); + + retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg); + if (retval) { + printk(KERN_WARNING "pmic: Interrupt request failed\n"); + goto err; + } + for (i = 0; i < 8; i++) { set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, handle_simple_irq, "demux"); diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index 1fe0f1feff71..865ef78d6f1a 100644 --- a/drivers/platform/x86/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c @@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \ return -EINVAL; \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ show_bool_##value, set_bool_##value); show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index dd599585c6a9..eb9922385ef8 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode) if (keycode != KEY_RESERVED) { mutex_lock(&tpacpi_inputdev_send_mutex); + input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); input_report_key(tpacpi_inputdev, keycode, 1); - if (keycode == KEY_UNKNOWN) - input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, - scancode); input_sync(tpacpi_inputdev); + input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); input_report_key(tpacpi_inputdev, keycode, 0); - if (keycode == KEY_UNKNOWN) - input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, - scancode); input_sync(tpacpi_inputdev); mutex_unlock(&tpacpi_inputdev_send_mutex); diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig index f3a73dd77660..e4c4f3dc0728 100644 --- a/drivers/pps/generators/Kconfig +++ b/drivers/pps/generators/Kconfig @@ -6,7 +6,7 @@ comment "PPS generators support" config PPS_GENERATOR_PARPORT tristate "Parallel port PPS signal generator" - depends on PARPORT + depends on PARPORT && BROKEN help If you say yes here you get support for a PPS signal generator which utilizes STROBE pin of a parallel port to send PPS signals. It uses diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index cba1b43f7519..a4e8eb9fece6 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, { unsigned long flags; int captured = 0; - struct pps_ktime ts_real; + struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 }; /* check event type */ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 76b41853a877..1269fbd2deca 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c @@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj, /* Several chips lock up trying to read undefined config space */ if (capable(CAP_SYS_ADMIN)) - size = 0x200000; + size = RIO_MAINT_SPACE_SZ; - if (off > size) + if (off >= size) return 0; if (off + count > size) { size -= off; @@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj, loff_t init_off = off; u8 *data = (u8 *) buf; - if (off > 0x200000) + if (off >= RIO_MAINT_SPACE_SZ) return 0; - if (off + count > 0x200000) { - size = 0x200000 - off; + if (off + count > RIO_MAINT_SPACE_SZ) { + size = RIO_MAINT_SPACE_SZ - off; count = size; } @@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, - .size = 0x200000, + .size = RIO_MAINT_SPACE_SZ, .read = rio_read_config, .write = rio_write_config, }; diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index f53d31b950d4..2bb5de1f2421 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c @@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); - BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); + BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); return mc13xxx_regulators[id].voltages[val]; } diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 8b0d2c4bde91..06df898842c0 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_IDLE; default: BUG(); + return -EINVAL; } } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index cdd97192dc69..4941cade319f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -97,6 +97,18 @@ config RTC_INTF_DEV If unsure, say Y. +config RTC_INTF_DEV_UIE_EMUL + bool "RTC UIE emulation on dev interface" + depends on RTC_INTF_DEV + help + Provides an emulation for RTC_UIE if the underlying rtc chip + driver does not expose RTC_UIE ioctls. Those requests generate + once-per-second update interrupts, used for synchronization. + + The emulation code will read the time from the hardware + clock several times per second, please enable this option + only if you know that you really need it. + config RTC_DRV_TEST tristate "Test driver/device" help diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c01967244d..cb2f0728fd70 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) } if (err) - return err; - - if (!rtc->ops) + /* nothing */; + else if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->alarm_irq_enable) err = -EINVAL; @@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) if (err) return err; +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + if (enabled == 0 && rtc->uie_irq_active) { + mutex_unlock(&rtc->ops_lock); + return rtc_dev_update_irq_enable_emul(rtc, 0); + } +#endif /* make sure we're changing state */ if (rtc->uie_rtctimer.enabled == enabled) goto out; @@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) out: mutex_unlock(&rtc->ops_lock); +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + /* + * Enable emulation if the driver did not provide + * the update_irq_enable function pointer or if returned + * -EINVAL to signal that it has been configured without + * interrupts or that are not available at the moment. + */ + if (err == -EINVAL) + err = rtc_dev_update_irq_enable_emul(rtc, enabled); +#endif return err; } @@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable); * * Triggers the registered irq_task function callback. */ -static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) +void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) { unsigned long flags; diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index c36749e4c926..5469c52cba3d 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c @@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = { .read_alarm = at91_rtc_readalarm, .set_alarm = at91_rtc_setalarm, .proc = at91_rtc_proc, - .alarm_irq_enabled = at91_rtc_alarm_irq_enable, + .alarm_irq_enable = at91_rtc_alarm_irq_enable, }; /* diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 37c3cc1b3dd5..d0e06edb14c5 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file) return err; } +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL +/* + * Routine to poll RTC seconds field for change as often as possible, + * after first RTC_UIE use timer to reduce polling + */ +static void rtc_uie_task(struct work_struct *work) +{ + struct rtc_device *rtc = + container_of(work, struct rtc_device, uie_task); + struct rtc_time tm; + int num = 0; + int err; + + err = rtc_read_time(rtc, &tm); + + spin_lock_irq(&rtc->irq_lock); + if (rtc->stop_uie_polling || err) { + rtc->uie_task_active = 0; + } else if (rtc->oldsecs != tm.tm_sec) { + num = (tm.tm_sec + 60 - rtc->oldsecs) % 60; + rtc->oldsecs = tm.tm_sec; + rtc->uie_timer.expires = jiffies + HZ - (HZ/10); + rtc->uie_timer_active = 1; + rtc->uie_task_active = 0; + add_timer(&rtc->uie_timer); + } else if (schedule_work(&rtc->uie_task) == 0) { + rtc->uie_task_active = 0; + } + spin_unlock_irq(&rtc->irq_lock); + if (num) + rtc_handle_legacy_irq(rtc, num, RTC_UF); +} +static void rtc_uie_timer(unsigned long data) +{ + struct rtc_device *rtc = (struct rtc_device *)data; + unsigned long flags; + + spin_lock_irqsave(&rtc->irq_lock, flags); + rtc->uie_timer_active = 0; + rtc->uie_task_active = 1; + if ((schedule_work(&rtc->uie_task) == 0)) + rtc->uie_task_active = 0; + spin_unlock_irqrestore(&rtc->irq_lock, flags); +} + +static int clear_uie(struct rtc_device *rtc) +{ + spin_lock_irq(&rtc->irq_lock); + if (rtc->uie_irq_active) { + rtc->stop_uie_polling = 1; + if (rtc->uie_timer_active) { + spin_unlock_irq(&rtc->irq_lock); + del_timer_sync(&rtc->uie_timer); + spin_lock_irq(&rtc->irq_lock); + rtc->uie_timer_active = 0; + } + if (rtc->uie_task_active) { + spin_unlock_irq(&rtc->irq_lock); + flush_scheduled_work(); + spin_lock_irq(&rtc->irq_lock); + } + rtc->uie_irq_active = 0; + } + spin_unlock_irq(&rtc->irq_lock); + return 0; +} + +static int set_uie(struct rtc_device *rtc) +{ + struct rtc_time tm; + int err; + + err = rtc_read_time(rtc, &tm); + if (err) + return err; + spin_lock_irq(&rtc->irq_lock); + if (!rtc->uie_irq_active) { + rtc->uie_irq_active = 1; + rtc->stop_uie_polling = 0; + rtc->oldsecs = tm.tm_sec; + rtc->uie_task_active = 1; + if (schedule_work(&rtc->uie_task) == 0) + rtc->uie_task_active = 0; + } + rtc->irq_data = 0; + spin_unlock_irq(&rtc->irq_lock); + return 0; +} + +int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled) +{ + if (enabled) + return set_uie(rtc); + else + return clear_uie(rtc); +} +EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul); + +#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */ static ssize_t rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -387,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc) rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + INIT_WORK(&rtc->uie_task, rtc_uie_task); + setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); +#endif + cdev_init(&rtc->char_dev, &rtc_dev_fops); rtc->char_dev.owner = rtc->owner; } diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 23a9ee19764c..950735415a7c 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c @@ -1,7 +1,7 @@ /* * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C * - * Copyright (C) 2009-2010 Freescale Semiconductor. + * Copyright (C) 2009-2011 Freescale Semiconductor. * Author: Jack Lan <jack.lan@freescale.com> * * This program is free software; you can redistribute it and/or modify it @@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time) time->tm_hour = bcd2bin(hour); } - time->tm_wday = bcd2bin(week); + /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ + time->tm_wday = bcd2bin(week) - 1; time->tm_mday = bcd2bin(day); - time->tm_mon = bcd2bin(month & 0x7F); + /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ + time->tm_mon = bcd2bin(month & 0x7F) - 1; if (century) add_century = 100; @@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time) buf[0] = bin2bcd(time->tm_sec); buf[1] = bin2bcd(time->tm_min); buf[2] = bin2bcd(time->tm_hour); - buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ + /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ + buf[3] = bin2bcd(time->tm_wday + 1); buf[4] = bin2bcd(time->tm_mday); /* Date */ - buf[5] = bin2bcd(time->tm_mon); + /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ + buf[5] = bin2bcd(time->tm_mon + 1); if (time->tm_year >= 100) { buf[5] |= 0x80; buf[6] = bin2bcd(time->tm_year - 100); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index cf953ecbfca9..b80fa2882408 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id) } /* Update control registers */ -static void s3c_rtc_setaie(int to) +static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) { unsigned int tmp; - pr_debug("%s: aie=%d\n", __func__, to); + pr_debug("%s: aie=%d\n", __func__, enabled); tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; - if (to) + if (enabled) tmp |= S3C2410_RTCALM_ALMEN; writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); + + return 0; } static int s3c_rtc_setpie(struct device *dev, int enabled) @@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) writeb(alrm_en, base + S3C2410_RTCALM); - s3c_rtc_setaie(alrm->enabled); + s3c_rtc_setaie(dev, alrm->enabled); return 0; } @@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) rtc_device_unregister(rtc); s3c_rtc_setpie(&dev->dev, 0); - s3c_rtc_setaie(0); + s3c_rtc_setaie(&dev->dev, 0); clk_disable(rtc_clk); clk_put(rtc_clk); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 318672d05563..a9fe23d5bd0f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline; static struct ccw_device_id dasd_eckd_ids[] = { { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, - { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, + { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index c881a14fa5dd..1f6a4d894e73 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -62,8 +62,8 @@ static int xpram_devs; /* * Parameter parsing functions. */ -static int __initdata devs = XPRAM_DEVS; -static char __initdata *sizes[XPRAM_MAX_DEVS]; +static int devs = XPRAM_DEVS; +static char *sizes[XPRAM_MAX_DEVS]; module_param(devs, int, 0); module_param_array(sizes, charp, NULL, 0); diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 8cd58e412b5e..5ad44daef73b 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp; - int ct, perm; + unsigned int ct; + int perm; argp = (void __user *)arg; diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 7a242f073632..267b54e8ff5a 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request) return rc; } +static inline void +tape_do_io_async_free(struct tape_device *device, struct tape_request *request) +{ + request->callback = (void *) tape_free_request; + request->callback_data = NULL; + tape_do_io_async(device, request); +} + extern int tape_oper_handler(int irq, int status); extern void tape_noper_handler(int irq, int status); extern int tape_open(struct tape_device *); diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index c17f35b6136a..c26511171ffe 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int); * Medium sense for 34xx tapes. There is no 'real' medium sense call. * So we just do a normal sense. */ -static int -tape_34xx_medium_sense(struct tape_device *device) +static void __tape_34xx_medium_sense(struct tape_request *request) { - struct tape_request *request; - unsigned char *sense; - int rc; - - request = tape_alloc_request(1, 32); - if (IS_ERR(request)) { - DBF_EXCEPTION(6, "MSEN fail\n"); - return PTR_ERR(request); - } - - request->op = TO_MSEN; - tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + struct tape_device *device = request->device; + unsigned char *sense; - rc = tape_do_io_interruptible(device, request); if (request->rc == 0) { sense = request->cpdata; @@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device) device->tape_generic_status |= GMT_WR_PROT(~0); else device->tape_generic_status &= ~GMT_WR_PROT(~0); - } else { + } else DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", request->rc); - } tape_free_request(request); +} + +static int tape_34xx_medium_sense(struct tape_device *device) +{ + struct tape_request *request; + int rc; + + request = tape_alloc_request(1, 32); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "MSEN fail\n"); + return PTR_ERR(request); + } + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + rc = tape_do_io_interruptible(device, request); + __tape_34xx_medium_sense(request); return rc; } +static void tape_34xx_medium_sense_async(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(1, 32); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "MSEN fail\n"); + return; + } + + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + request->callback = (void *) __tape_34xx_medium_sense; + request->callback_data = NULL; + tape_do_io_async(device, request); +} + struct tape_34xx_work { struct tape_device *device; enum tape_op op; @@ -109,6 +129,9 @@ struct tape_34xx_work { * is inserted but cannot call tape_do_io* from an interrupt context. * Maybe that's useful for other actions we want to start from the * interrupt handler. + * Note: the work handler is called by the system work queue. The tape + * commands started by the handler need to be asynchrounous, otherwise + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). */ static void tape_34xx_work_handler(struct work_struct *work) @@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work) switch(p->op) { case TO_MSEN: - tape_34xx_medium_sense(device); + tape_34xx_medium_sense_async(device); break; default: DBF_EVENT(3, "T34XX: internal error: unknown work\n"); diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index fbe361fcd2c0..de2e99e0a71b 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -329,17 +329,17 @@ out: /* * Enable encryption */ -static int tape_3592_enable_crypt(struct tape_device *device) +static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_enable_crypt\n"); if (!crypt_supported(device)) - return -ENOSYS; + return ERR_PTR(-ENOSYS); request = tape_alloc_request(2, 72); if (IS_ERR(request)) - return PTR_ERR(request); + return request; data = request->cpdata; memset(data,0,72); @@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device) request->op = TO_CRYPT_ON; tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + return request; +} + +static int tape_3592_enable_crypt(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_enable_crypt(device); + if (IS_ERR(request)) + return PTR_ERR(request); return tape_do_io_free(device, request); } +static void tape_3592_enable_crypt_async(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_enable_crypt(device); + if (!IS_ERR(request)) + tape_do_io_async_free(device, request); +} + /* * Disable encryption */ -static int tape_3592_disable_crypt(struct tape_device *device) +static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_disable_crypt\n"); if (!crypt_supported(device)) - return -ENOSYS; + return ERR_PTR(-ENOSYS); request = tape_alloc_request(2, 72); if (IS_ERR(request)) - return PTR_ERR(request); + return request; data = request->cpdata; memset(data,0,72); @@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device) tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + return request; +} + +static int tape_3592_disable_crypt(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_disable_crypt(device); + if (IS_ERR(request)) + return PTR_ERR(request); return tape_do_io_free(device, request); } +static void tape_3592_disable_crypt_async(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_disable_crypt(device); + if (!IS_ERR(request)) + tape_do_io_async_free(device, request); +} + /* * IOCTL: Set encryption status */ @@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) /* * SENSE Medium: Get Sense data about medium state */ -static int -tape_3590_sense_medium(struct tape_device *device) +static int tape_3590_sense_medium(struct tape_device *device) { struct tape_request *request; @@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device) return tape_do_io_free(device, request); } +static void tape_3590_sense_medium_async(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(1, 128); + if (IS_ERR(request)) + return; + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); + tape_do_io_async_free(device, request); +} + /* * MTTELL: Tell block. Return the number of block relative to current file. */ @@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device, * 2. The attention msg is written to the "read subsystem data" buffer. * In this case we probably should print it to the console. */ -static int -tape_3590_read_attmsg(struct tape_device *device) +static void tape_3590_read_attmsg_async(struct tape_device *device) { struct tape_request *request; char *buf; request = tape_alloc_request(3, 4096); if (IS_ERR(request)) - return PTR_ERR(request); + return; request->op = TO_READ_ATTMSG; buf = request->cpdata; buf[0] = PREP_RD_SS_DATA; @@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device) tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); - return tape_do_io_free(device, request); + tape_do_io_async_free(device, request); } /* * These functions are used to schedule follow-up actions from within an * interrupt context (like unsolicited interrupts). + * Note: the work handler is called by the system work queue. The tape + * commands started by the handler need to be asynchrounous, otherwise + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). */ struct work_handler_data { struct tape_device *device; @@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work) switch (p->op) { case TO_MSEN: - tape_3590_sense_medium(p->device); + tape_3590_sense_medium_async(p->device); break; case TO_READ_ATTMSG: - tape_3590_read_attmsg(p->device); + tape_3590_read_attmsg_async(p->device); break; case TO_CRYPT_ON: - tape_3592_enable_crypt(p->device); + tape_3592_enable_crypt_async(p->device); break; case TO_CRYPT_OFF: - tape_3592_disable_crypt(p->device); + tape_3592_disable_crypt_async(p->device); break; default: DBF_EVENT(3, "T3590: work handler undefined for " diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 44578b56ad0a..d3e58d763b43 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) { struct Scsi_Host *host = rport_to_shost(rport); fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + unsigned long flags; if (!fcport) return; @@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) * Transport has effectively 'deleted' the rport, clear * all local references. */ - spin_lock_irq(host->host_lock); + spin_lock_irqsave(host->host_lock, flags); fcport->rport = fcport->drport = NULL; *((fc_port_t **)rport->dd_data) = NULL; - spin_unlock_irq(host->host_lock); + spin_unlock_irqrestore(host->host_lock, flags); if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f948e1a73aec..d9479c3fe5f8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; + unsigned long flags; - spin_lock_irq(fcport->vha->host->host_lock); + spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; - spin_unlock_irq(fcport->vha->host->host_lock); + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); } @@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) struct fc_rport_identifiers rport_ids; struct fc_rport *rport; struct qla_hw_data *ha = vha->hw; + unsigned long flags; qla2x00_rport_del(fcport); @@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "Unable to allocate fc remote port!\n"); return; } - spin_lock_irq(fcport->vha->host->host_lock); + spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; - spin_unlock_irq(fcport->vha->host->host_lock); + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); rport->supported_classes = fcport->supported_classes; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1fb..f27724d76cf6 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) } if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || - atomic_read(&fcport->state) == FCS_DEVICE_LOST || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; @@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, { struct fc_rport *rport; scsi_qla_host_t *base_vha; + unsigned long flags; if (!fcport->rport) return; @@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, rport = fcport->rport; if (defer) { base_vha = pci_get_drvdata(vha->hw->pdev); - spin_lock_irq(vha->host->host_lock); + spin_lock_irqsave(vha->host->host_lock, flags); fcport->drport = rport; - spin_unlock_irq(vha->host->host_lock); + spin_unlock_irqrestore(vha->host->host_lock, flags); set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); } else @@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data) set_user_nice(current, -20); + set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { DEBUG3(printk("qla2x00: DPC handler sleeping\n")); - set_current_state(TASK_INTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); @@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data) qla2x00_do_dpc_all_vps(base_vha); ha->dpc_active = 0; + set_current_state(TASK_INTERRUPTIBLE); } /* End of while(1) */ + __set_current_state(TASK_RUNNING); DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b310934efed..a6b2d72022fc 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd, unsigned long long lba, unsigned int num, int write) { int ret; - unsigned int block, rest = 0; + unsigned long long block, rest = 0; int (*func)(struct scsi_cmnd *, unsigned char *, int); func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9045c52abd25..fb2bb35c62cb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) &sdev->request_queue->queue_flags); if (flagset) queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); - __blk_run_queue(sdev->request_queue); + __blk_run_queue(sdev->request_queue, false); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); spin_unlock(sdev->request_queue->queue_lock); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 998c01be3234..5c3ccfc6b622 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); if (flagset) queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); - __blk_run_queue(rport->rqst_q); + __blk_run_queue(rport->rqst_q, false); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c index 351d8a375b57..19752b09e155 100644 --- a/drivers/spi/pxa2xx_spi_pci.c +++ b/drivers/spi/pxa2xx_spi_pci.c @@ -7,10 +7,9 @@ #include <linux/of_device.h> #include <linux/spi/pxa2xx_spi.h> -struct awesome_struct { +struct ce4100_info { struct ssp_device ssp; - struct platform_device spi_pdev; - struct pxa2xx_spi_master spi_pdata; + struct platform_device *spi_pdev; }; static DEFINE_MUTEX(ssp_lock); @@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp) } EXPORT_SYMBOL_GPL(pxa_ssp_free); -static void plat_dev_release(struct device *dev) -{ - struct awesome_struct *as = container_of(dev, - struct awesome_struct, spi_pdev.dev); - - of_device_node_put(&as->spi_pdev.dev); -} - static int __devinit ce4100_spi_probe(struct pci_dev *dev, const struct pci_device_id *ent) { int ret; resource_size_t phys_beg; resource_size_t phys_len; - struct awesome_struct *spi_info; + struct ce4100_info *spi_info; struct platform_device *pdev; - struct pxa2xx_spi_master *spi_pdata; + struct pxa2xx_spi_master spi_pdata; struct ssp_device *ssp; ret = pci_enable_device(dev); @@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, return ret; } + pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); - if (!spi_info) { + if (!pdev || !spi_info ) { ret = -ENOMEM; - goto err_kz; + goto err_nomem; } - ssp = &spi_info->ssp; - pdev = &spi_info->spi_pdev; - spi_pdata = &spi_info->spi_pdata; + memset(&spi_pdata, 0, sizeof(spi_pdata)); + spi_pdata.num_chipselect = dev->devfn; - pdev->name = "pxa2xx-spi"; - pdev->id = dev->devfn; - pdev->dev.parent = &dev->dev; - pdev->dev.platform_data = &spi_info->spi_pdata; + ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); + if (ret) + goto err_nomem; + pdev->dev.parent = &dev->dev; #ifdef CONFIG_OF pdev->dev.of_node = dev->dev.of_node; #endif - pdev->dev.release = plat_dev_release; - - spi_pdata->num_chipselect = dev->devfn; - + ssp = &spi_info->ssp; ssp->phys_base = pci_resource_start(dev, 0); ssp->mmio_base = ioremap(phys_beg, phys_len); if (!ssp->mmio_base) { dev_err(&pdev->dev, "failed to ioremap() registers\n"); ret = -EIO; - goto err_remap; + goto err_nomem; } ssp->irq = dev->irq; ssp->port_id = pdev->id; @@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, pci_set_drvdata(dev, spi_info); - ret = platform_device_register(pdev); + ret = platform_device_add(pdev); if (ret) goto err_dev_add; @@ -135,27 +123,21 @@ err_dev_add: mutex_unlock(&ssp_lock); iounmap(ssp->mmio_base); -err_remap: - kfree(spi_info); - -err_kz: +err_nomem: release_mem_region(phys_beg, phys_len); - + platform_device_put(pdev); + kfree(spi_info); return ret; } static void __devexit ce4100_spi_remove(struct pci_dev *dev) { - struct awesome_struct *spi_info; - struct platform_device *pdev; + struct ce4100_info *spi_info; struct ssp_device *ssp; spi_info = pci_get_drvdata(dev); - ssp = &spi_info->ssp; - pdev = &spi_info->spi_pdev; - - platform_device_unregister(pdev); + platform_device_unregister(spi_info->spi_pdev); iounmap(ssp->mmio_base); release_mem_region(pci_resource_start(dev, 0), @@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev) } static struct pci_device_id ce4100_spi_devices[] __devinitdata = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, { }, }; diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 5cfd70819f08..973bb190ef57 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \ target_core_transport.o \ target_core_cdb.o \ target_core_ua.o \ - target_core_rd.o \ - target_core_mib.o + target_core_rd.o obj-$(CONFIG_TARGET_CORE) += target_core_mod.o diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2764510798b0..caf8dc18ee0a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -37,7 +37,6 @@ #include <linux/parser.h> #include <linux/syscalls.h> #include <linux/configfs.h> -#include <linux/proc_fs.h> #include <target/target_core_base.h> #include <target/target_core_device.h> @@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item) { struct se_subsystem_dev *se_dev = container_of(to_config_group(item), struct se_subsystem_dev, se_dev_group); - struct config_group *dev_cg; - - if (!(se_dev)) - return; + struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); + struct se_subsystem_api *t = hba->transport; + struct config_group *dev_cg = &se_dev->se_dev_group; - dev_cg = &se_dev->se_dev_group; kfree(dev_cg->default_groups); + /* + * This pointer will set when the storage is enabled with: + *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` + */ + if (se_dev->se_dev_ptr) { + printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" + "virtual_device() for se_dev_ptr: %p\n", + se_dev->se_dev_ptr); + + se_free_virtual_device(se_dev->se_dev_ptr, hba); + } else { + /* + * Release struct se_subsystem_dev->se_dev_su_ptr.. + */ + printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" + "device() for se_dev_su_ptr: %p\n", + se_dev->se_dev_su_ptr); + + t->free_device(se_dev->se_dev_su_ptr); + } + + printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" + "_dev_t: %p\n", se_dev); + kfree(se_dev); } static ssize_t target_core_dev_show(struct config_item *item, @@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { NULL, }; +static void target_core_alua_lu_gp_release(struct config_item *item) +{ + struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), + struct t10_alua_lu_gp, lu_gp_group); + + core_alua_free_lu_gp(lu_gp); +} + static struct configfs_item_operations target_core_alua_lu_gp_ops = { + .release = target_core_alua_lu_gp_release, .show_attribute = target_core_alua_lu_gp_attr_show, .store_attribute = target_core_alua_lu_gp_attr_store, }; @@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp( printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" " Group: core/alua/lu_gps/%s, ID: %hu\n", config_item_name(item), lu_gp->lu_gp_id); - + /* + * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() + * -> target_core_alua_lu_gp_release() + */ config_item_put(item); - core_alua_free_lu_gp(lu_gp); } static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { @@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { NULL, }; +static void target_core_alua_tg_pt_gp_release(struct config_item *item) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), + struct t10_alua_tg_pt_gp, tg_pt_gp_group); + + core_alua_free_tg_pt_gp(tg_pt_gp); +} + static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { + .release = target_core_alua_tg_pt_gp_release, .show_attribute = target_core_alua_tg_pt_gp_attr_show, .store_attribute = target_core_alua_tg_pt_gp_attr_store, }; @@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp( printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" " Group: alua/tg_pt_gps/%s, ID: %hu\n", config_item_name(item), tg_pt_gp->tg_pt_gp_id); - + /* + * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() + * -> target_core_alua_tg_pt_gp_release(). + */ config_item_put(item); - core_alua_free_tg_pt_gp(tg_pt_gp); } static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { @@ -2771,13 +2814,11 @@ static void target_core_drop_subdev( struct se_subsystem_api *t; struct config_item *df_item; struct config_group *dev_cg, *tg_pt_gp_cg; - int i, ret; + int i; hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); - if (mutex_lock_interruptible(&hba->hba_access_mutex)) - goto out; - + mutex_lock(&hba->hba_access_mutex); t = hba->transport; spin_lock(&se_global->g_device_lock); @@ -2791,7 +2832,10 @@ static void target_core_drop_subdev( config_item_put(df_item); } kfree(tg_pt_gp_cg->default_groups); - core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); + /* + * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp + * directly from target_core_alua_tg_pt_gp_release(). + */ T10_ALUA(se_dev)->default_tg_pt_gp = NULL; dev_cg = &se_dev->se_dev_group; @@ -2800,38 +2844,12 @@ static void target_core_drop_subdev( dev_cg->default_groups[i] = NULL; config_item_put(df_item); } - - config_item_put(item); /* - * This pointer will set when the storage is enabled with: - * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` + * The releasing of se_dev and associated se_dev->se_dev_ptr is done + * from target_core_dev_item_ops->release() ->target_core_dev_release(). */ - if (se_dev->se_dev_ptr) { - printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" - "virtual_device() for se_dev_ptr: %p\n", - se_dev->se_dev_ptr); - - ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); - if (ret < 0) - goto hba_out; - } else { - /* - * Release struct se_subsystem_dev->se_dev_su_ptr.. - */ - printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" - "device() for se_dev_su_ptr: %p\n", - se_dev->se_dev_su_ptr); - - t->free_device(se_dev->se_dev_su_ptr); - } - - printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" - "_dev_t: %p\n", se_dev); - -hba_out: + config_item_put(item); mutex_unlock(&hba->hba_access_mutex); -out: - kfree(se_dev); } static struct configfs_group_operations target_core_hba_group_ops = { @@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); +static void target_core_hba_release(struct config_item *item) +{ + struct se_hba *hba = container_of(to_config_group(item), + struct se_hba, hba_group); + core_delete_hba(hba); +} + static struct configfs_attribute *target_core_hba_attrs[] = { &target_core_hba_hba_info.attr, &target_core_hba_hba_mode.attr, @@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = { }; static struct configfs_item_operations target_core_hba_item_ops = { + .release = target_core_hba_release, .show_attribute = target_core_hba_attr_show, .store_attribute = target_core_hba_attr_store, }; @@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget( struct config_group *group, struct config_item *item) { - struct se_hba *hba = item_to_hba(item); - + /* + * core_delete_hba() is called from target_core_hba_item_ops->release() + * -> target_core_hba_release() + */ config_item_put(item); - core_delete_hba(hba); } static struct configfs_group_operations target_core_group_ops = { @@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void) struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *lu_gp_cg = NULL; struct configfs_subsystem *subsys; - struct proc_dir_entry *scsi_target_proc = NULL; struct t10_alua_lu_gp *lu_gp; int ret; @@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void) if (core_dev_setup_virtual_lun0() < 0) goto out; - scsi_target_proc = proc_mkdir("scsi_target", 0); - if (!(scsi_target_proc)) { - printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); - goto out; - } - ret = init_scsi_target_mib(); - if (ret < 0) - goto out; - return 0; out: configfs_unregister_subsystem(subsys); - if (scsi_target_proc) - remove_proc_entry("scsi_target", 0); core_dev_release_virtual_lun0(); rd_module_exit(); out_global: @@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void) config_item_put(item); } kfree(lu_gp_cg->default_groups); - core_alua_free_lu_gp(se_global->default_lu_gp); - se_global->default_lu_gp = NULL; + lu_gp_cg->default_groups = NULL; alua_cg = &se_global->alua_group; for (i = 0; alua_cg->default_groups[i]; i++) { @@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void) config_item_put(item); } kfree(alua_cg->default_groups); + alua_cg->default_groups = NULL; hba_cg = &se_global->target_core_hbagroup; for (i = 0; hba_cg->default_groups[i]; i++) { @@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void) config_item_put(item); } kfree(hba_cg->default_groups); - - for (i = 0; subsys->su_group.default_groups[i]; i++) { - item = &subsys->su_group.default_groups[i]->cg_item; - subsys->su_group.default_groups[i] = NULL; - config_item_put(item); - } + hba_cg->default_groups = NULL; + /* + * We expect subsys->su_group.default_groups to be released + * by configfs subsystem provider logic.. + */ + configfs_unregister_subsystem(subsys); kfree(subsys->su_group.default_groups); - configfs_unregister_subsystem(subsys); + core_alua_free_lu_gp(se_global->default_lu_gp); + se_global->default_lu_gp = NULL; + printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" " Infrastructure\n"); - remove_scsi_target_mib(); - remove_proc_entry("scsi_target", 0); core_dev_release_virtual_lun0(); rd_module_exit(); release_se_global(); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 317ce58d426d..5da051a07fa3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -373,11 +373,11 @@ int core_update_device_list_for_node( /* * deve->se_lun_acl will be NULL for demo-mode created LUNs * that have not been explictly concerted to MappedLUNs -> - * struct se_lun_acl. + * struct se_lun_acl, but we remove deve->alua_port_list from + * port->sep_alua_list. This also means that active UAs and + * NodeACL context specific PR metadata for demo-mode + * MappedLUN *deve will be released below.. */ - if (!(deve->se_lun_acl)) - return 0; - spin_lock_bh(&port->sep_alua_lock); list_del(&deve->alua_port_list); spin_unlock_bh(&port->sep_alua_lock); @@ -395,12 +395,14 @@ int core_update_device_list_for_node( printk(KERN_ERR "struct se_dev_entry->se_lun_acl" " already set for demo mode -> explict" " LUN ACL transition\n"); + spin_unlock_irq(&nacl->device_list_lock); return -1; } if (deve->se_lun != lun) { printk(KERN_ERR "struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); + spin_unlock_irq(&nacl->device_list_lock); return -1; } deve->se_lun_acl = lun_acl; @@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev) } } spin_unlock(&hba->device_lock); - - while (atomic_read(&hba->dev_mib_access_count)) - cpu_relax(); } int se_dev_check_online(struct se_device *dev) diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 32b148d7e261..b65d1c8e7740 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); +static void target_fabric_mappedlun_release(struct config_item *item) +{ + struct se_lun_acl *lacl = container_of(to_config_group(item), + struct se_lun_acl, se_lun_group); + struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; + + core_dev_free_initiator_node_lun_acl(se_tpg, lacl); +} + static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { &target_fabric_mappedlun_write_protect.attr, NULL, }; static struct configfs_item_operations target_fabric_mappedlun_item_ops = { + .release = target_fabric_mappedlun_release, .show_attribute = target_fabric_mappedlun_attr_show, .store_attribute = target_fabric_mappedlun_attr_store, .allow_link = target_fabric_mappedlun_link, @@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun( struct config_group *group, struct config_item *item) { - struct se_lun_acl *lacl = container_of(to_config_group(item), - struct se_lun_acl, se_lun_group); - struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; - config_item_put(item); - core_dev_free_initiator_node_lun_acl(se_tpg, lacl); +} + +static void target_fabric_nacl_base_release(struct config_item *item) +{ + struct se_node_acl *se_nacl = container_of(to_config_group(item), + struct se_node_acl, acl_group); + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + tf->tf_ops.fabric_drop_nodeacl(se_nacl); } static struct configfs_item_operations target_fabric_nacl_base_item_ops = { + .release = target_fabric_nacl_base_release, .show_attribute = target_fabric_nacl_base_attr_show, .store_attribute = target_fabric_nacl_base_attr_store, }; @@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl( struct config_group *group, struct config_item *item) { - struct se_portal_group *se_tpg = container_of(group, - struct se_portal_group, tpg_acl_group); - struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_node_acl *se_nacl = container_of(to_config_group(item), struct se_node_acl, acl_group); struct config_item *df_item; @@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl( nacl_cg->default_groups[i] = NULL; config_item_put(df_item); } - + /* + * struct se_node_acl free is done in target_fabric_nacl_base_release() + */ config_item_put(item); - tf->tf_ops.fabric_drop_nodeacl(se_nacl); } static struct configfs_group_operations target_fabric_nacl_group_ops = { @@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); +static void target_fabric_np_base_release(struct config_item *item) +{ + struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), + struct se_tpg_np, tpg_np_group); + struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + tf->tf_ops.fabric_drop_np(se_tpg_np); +} + static struct configfs_item_operations target_fabric_np_base_item_ops = { + .release = target_fabric_np_base_release, .show_attribute = target_fabric_np_base_attr_show, .store_attribute = target_fabric_np_base_attr_store, }; @@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np( if (!(se_tpg_np) || IS_ERR(se_tpg_np)) return ERR_PTR(-EINVAL); + se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(&se_tpg_np->tpg_np_group, name, &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); @@ -476,14 +502,10 @@ static void target_fabric_drop_np( struct config_group *group, struct config_item *item) { - struct se_portal_group *se_tpg = container_of(group, - struct se_portal_group, tpg_np_group); - struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; - struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), - struct se_tpg_np, tpg_np_group); - + /* + * struct se_tpg_np is released via target_fabric_np_base_release() + */ config_item_put(item); - tf->tf_ops.fabric_drop_np(se_tpg_np); } static struct configfs_group_operations target_fabric_np_group_ops = { @@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); */ CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); +static void target_fabric_tpg_release(struct config_item *item) +{ + struct se_portal_group *se_tpg = container_of(to_config_group(item), + struct se_portal_group, tpg_group); + struct se_wwn *wwn = se_tpg->se_tpg_wwn; + struct target_fabric_configfs *tf = wwn->wwn_tf; + + tf->tf_ops.fabric_drop_tpg(se_tpg); +} + static struct configfs_item_operations target_fabric_tpg_base_item_ops = { + .release = target_fabric_tpg_release, .show_attribute = target_fabric_tpg_attr_show, .store_attribute = target_fabric_tpg_attr_store, }; @@ -872,8 +905,6 @@ static void target_fabric_drop_tpg( struct config_group *group, struct config_item *item) { - struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); - struct target_fabric_configfs *tf = wwn->wwn_tf; struct se_portal_group *se_tpg = container_of(to_config_group(item), struct se_portal_group, tpg_group); struct config_group *tpg_cg = &se_tpg->tpg_group; @@ -890,15 +921,28 @@ static void target_fabric_drop_tpg( } config_item_put(item); - tf->tf_ops.fabric_drop_tpg(se_tpg); } +static void target_fabric_release_wwn(struct config_item *item) +{ + struct se_wwn *wwn = container_of(to_config_group(item), + struct se_wwn, wwn_group); + struct target_fabric_configfs *tf = wwn->wwn_tf; + + tf->tf_ops.fabric_drop_wwn(wwn); +} + +static struct configfs_item_operations target_fabric_tpg_item_ops = { + .release = target_fabric_release_wwn, +}; + static struct configfs_group_operations target_fabric_tpg_group_ops = { .make_group = target_fabric_make_tpg, .drop_item = target_fabric_drop_tpg, }; -TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); +TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, + NULL); /* End of tfc_tpg_cit */ @@ -932,13 +976,7 @@ static void target_fabric_drop_wwn( struct config_group *group, struct config_item *item) { - struct target_fabric_configfs *tf = container_of(group, - struct target_fabric_configfs, tf_group); - struct se_wwn *wwn = container_of(to_config_group(item), - struct se_wwn, wwn_group); - config_item_put(item); - tf->tf_ops.fabric_drop_wwn(wwn); } static struct configfs_group_operations target_fabric_wwn_group_ops = { diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c6e0d757e76e..67f0c09983c8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice( bd = blkdev_get_by_path(ib_dev->ibd_udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); - if (!(bd)) + if (IS_ERR(bd)) goto failed; /* * Setup the local scope queue_limits from struct request_queue->limits @@ -220,8 +220,10 @@ static void iblock_free_device(void *p) { struct iblock_dev *ib_dev = p; - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); - bioset_free(ib_dev->ibd_bio_set); + if (ib_dev->ibd_bd != NULL) + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + if (ib_dev->ibd_bio_set != NULL) + bioset_free(ib_dev->ibd_bio_set); kfree(ib_dev); } diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c deleted file mode 100644 index d5a48aa0d2d1..000000000000 --- a/drivers/target/target_core_mib.c +++ /dev/null @@ -1,1078 +0,0 @@ -/******************************************************************************* - * Filename: target_core_mib.c - * - * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger <nab@linux-iscsi.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ******************************************************************************/ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/delay.h> -#include <linux/timer.h> -#include <linux/string.h> -#include <linux/version.h> -#include <generated/utsrelease.h> -#include <linux/utsname.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/blkdev.h> -#include <scsi/scsi.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_host.h> - -#include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> -#include <target/target_core_configfs.h> - -#include "target_core_hba.h" -#include "target_core_mib.h" - -/* SCSI mib table index */ -static struct scsi_index_table scsi_index_table; - -#ifndef INITIAL_JIFFIES -#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) -#endif - -/* SCSI Instance Table */ -#define SCSI_INST_SW_INDEX 1 -#define SCSI_TRANSPORT_INDEX 1 - -#define NONE "None" -#define ISPRINT(a) ((a >= ' ') && (a <= '~')) - -static inline int list_is_first(const struct list_head *list, - const struct list_head *head) -{ - return list->prev == head; -} - -static void *locate_hba_start( - struct seq_file *seq, - loff_t *pos) -{ - spin_lock(&se_global->g_device_lock); - return seq_list_start(&se_global->g_se_dev_list, *pos); -} - -static void *locate_hba_next( - struct seq_file *seq, - void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_se_dev_list, pos); -} - -static void locate_hba_stop(struct seq_file *seq, void *v) -{ - spin_unlock(&se_global->g_device_lock); -} - -/**************************************************************************** - * SCSI MIB Tables - ****************************************************************************/ - -/* - * SCSI Instance Table - */ -static void *scsi_inst_seq_start( - struct seq_file *seq, - loff_t *pos) -{ - spin_lock(&se_global->hba_lock); - return seq_list_start(&se_global->g_hba_list, *pos); -} - -static void *scsi_inst_seq_next( - struct seq_file *seq, - void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_hba_list, pos); -} - -static void scsi_inst_seq_stop(struct seq_file *seq, void *v) -{ - spin_unlock(&se_global->hba_lock); -} - -static int scsi_inst_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba = list_entry(v, struct se_hba, hba_list); - - if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) - seq_puts(seq, "inst sw_indx\n"); - - seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); - seq_printf(seq, "plugin: %s version: %s\n", - hba->transport->name, TARGET_CORE_VERSION); - - return 0; -} - -static const struct seq_operations scsi_inst_seq_ops = { - .start = scsi_inst_seq_start, - .next = scsi_inst_seq_next, - .stop = scsi_inst_seq_stop, - .show = scsi_inst_seq_show -}; - -static int scsi_inst_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_inst_seq_ops); -} - -static const struct file_operations scsi_inst_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_inst_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Device Table - */ -static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_dev_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_dev_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - char str[28]; - int k; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst indx role ports\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - seq_printf(seq, "%u %u %s %u\n", hba->hba_index, - dev->dev_index, "Target", dev->dev_port_count); - - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - - /* vendor */ - for (k = 0; k < 8; k++) - str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? - DEV_T10_WWN(dev)->vendor[k] : 0x20; - str[k] = 0x20; - - /* model */ - for (k = 0; k < 16; k++) - str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? - DEV_T10_WWN(dev)->model[k] : 0x20; - str[k + 9] = 0; - - seq_printf(seq, "dev_alias: %s\n", str); - - return 0; -} - -static const struct seq_operations scsi_dev_seq_ops = { - .start = scsi_dev_seq_start, - .next = scsi_dev_seq_next, - .stop = scsi_dev_seq_stop, - .show = scsi_dev_seq_show -}; - -static int scsi_dev_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_dev_seq_ops); -} - -static const struct file_operations scsi_dev_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_dev_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Port Table - */ -static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_port_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_port_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - struct se_port *sep, *sep_tmp; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst device indx role busy_count\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - /* FIXME: scsiPortBusyStatuses count */ - spin_lock(&dev->se_port_lock); - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { - seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, - dev->dev_index, sep->sep_index, "Device", - dev->dev_index, 0); - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - -static const struct seq_operations scsi_port_seq_ops = { - .start = scsi_port_seq_start, - .next = scsi_port_seq_next, - .stop = scsi_port_seq_stop, - .show = scsi_port_seq_show -}; - -static int scsi_port_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_port_seq_ops); -} - -static const struct file_operations scsi_port_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_port_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Transport Table - */ -static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_transport_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_transport_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - struct se_port *se, *se_tmp; - struct se_portal_group *tpg; - struct t10_wwn *wwn; - char buf[64]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst device indx dev_name\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - wwn = DEV_T10_WWN(dev); - - spin_lock(&dev->se_port_lock); - list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { - tpg = se->sep_tpg; - sprintf(buf, "scsiTransport%s", - TPG_TFO(tpg)->get_fabric_name()); - - seq_printf(seq, "%u %s %u %s+%s\n", - hba->hba_index, /* scsiTransportIndex */ - buf, /* scsiTransportType */ - (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? - TPG_TFO(tpg)->tpg_get_inst_index(tpg) : - 0, - TPG_TFO(tpg)->tpg_get_wwn(tpg), - (strlen(wwn->unit_serial)) ? - /* scsiTransportDevName */ - wwn->unit_serial : wwn->vendor); - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - -static const struct seq_operations scsi_transport_seq_ops = { - .start = scsi_transport_seq_start, - .next = scsi_transport_seq_next, - .stop = scsi_transport_seq_stop, - .show = scsi_transport_seq_show -}; - -static int scsi_transport_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_transport_seq_ops); -} - -static const struct file_operations scsi_transport_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_transport_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Target Device Table - */ -static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - - -#define LU_COUNT 1 /* for now */ -static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - int non_accessible_lus = 0; - char status[16]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst indx num_LUs status non_access_LUs" - " resets\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - switch (dev->dev_status) { - case TRANSPORT_DEVICE_ACTIVATED: - strcpy(status, "activated"); - break; - case TRANSPORT_DEVICE_DEACTIVATED: - strcpy(status, "deactivated"); - non_accessible_lus = 1; - break; - case TRANSPORT_DEVICE_SHUTDOWN: - strcpy(status, "shutdown"); - non_accessible_lus = 1; - break; - case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: - case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: - strcpy(status, "offline"); - non_accessible_lus = 1; - break; - default: - sprintf(status, "unknown(%d)", dev->dev_status); - non_accessible_lus = 1; - } - - seq_printf(seq, "%u %u %u %s %u %u\n", - hba->hba_index, dev->dev_index, LU_COUNT, - status, non_accessible_lus, dev->num_resets); - - return 0; -} - -static const struct seq_operations scsi_tgt_dev_seq_ops = { - .start = scsi_tgt_dev_seq_start, - .next = scsi_tgt_dev_seq_next, - .stop = scsi_tgt_dev_seq_stop, - .show = scsi_tgt_dev_seq_show -}; - -static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_tgt_dev_seq_ops); -} - -static const struct file_operations scsi_tgt_dev_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_tgt_dev_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Target Port Table - */ -static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - struct se_port *sep, *sep_tmp; - struct se_portal_group *tpg; - u32 rx_mbytes, tx_mbytes; - unsigned long long num_cmds; - char buf[64]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst device indx name port_index in_cmds" - " write_mbytes read_mbytes hs_in_cmds\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - spin_lock(&dev->se_port_lock); - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { - tpg = sep->sep_tpg; - sprintf(buf, "%sPort#", - TPG_TFO(tpg)->get_fabric_name()); - - seq_printf(seq, "%u %u %u %s%d %s%s%d ", - hba->hba_index, - dev->dev_index, - sep->sep_index, - buf, sep->sep_index, - TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", - TPG_TFO(tpg)->tpg_get_tag(tpg)); - - spin_lock(&sep->sep_lun->lun_sep_lock); - num_cmds = sep->sep_stats.cmd_pdus; - rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); - tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); - spin_unlock(&sep->sep_lun->lun_sep_lock); - - seq_printf(seq, "%llu %u %u %u\n", num_cmds, - rx_mbytes, tx_mbytes, 0); - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - -static const struct seq_operations scsi_tgt_port_seq_ops = { - .start = scsi_tgt_port_seq_start, - .next = scsi_tgt_port_seq_next, - .stop = scsi_tgt_port_seq_stop, - .show = scsi_tgt_port_seq_show -}; - -static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_tgt_port_seq_ops); -} - -static const struct file_operations scsi_tgt_port_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_tgt_port_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Authorized Initiator Table: - * It contains the SCSI Initiators authorized to be attached to one of the - * local Target ports. - * Iterates through all active TPGs and extracts the info from the ACLs - */ -static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) -{ - spin_lock_bh(&se_global->se_tpg_lock); - return seq_list_start(&se_global->g_se_tpg_list, *pos); -} - -static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_se_tpg_list, pos); -} - -static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) -{ - spin_unlock_bh(&se_global->se_tpg_lock); -} - -static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) -{ - struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, - se_tpg_list); - struct se_dev_entry *deve; - struct se_lun *lun; - struct se_node_acl *se_nacl; - int j; - - if (list_is_first(&se_tpg->se_tpg_list, - &se_global->g_se_tpg_list)) - seq_puts(seq, "inst dev port indx dev_or_port intr_name " - "map_indx att_count num_cmds read_mbytes " - "write_mbytes hs_num_cmds creation_time row_status\n"); - - if (!(se_tpg)) - return 0; - - spin_lock(&se_tpg->acl_node_lock); - list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { - - atomic_inc(&se_nacl->mib_ref_count); - smp_mb__after_atomic_inc(); - spin_unlock(&se_tpg->acl_node_lock); - - spin_lock_irq(&se_nacl->device_list_lock); - for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { - deve = &se_nacl->device_list[j]; - if (!(deve->lun_flags & - TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || - (!deve->se_lun)) - continue; - lun = deve->se_lun; - if (!lun->lun_se_dev) - continue; - - seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" - " %u %s\n", - /* scsiInstIndex */ - (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? - TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : - 0, - /* scsiDeviceIndex */ - lun->lun_se_dev->dev_index, - /* scsiAuthIntrTgtPortIndex */ - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), - /* scsiAuthIntrIndex */ - se_nacl->acl_index, - /* scsiAuthIntrDevOrPort */ - 1, - /* scsiAuthIntrName */ - se_nacl->initiatorname[0] ? - se_nacl->initiatorname : NONE, - /* FIXME: scsiAuthIntrLunMapIndex */ - 0, - /* scsiAuthIntrAttachedTimes */ - deve->attach_count, - /* scsiAuthIntrOutCommands */ - deve->total_cmds, - /* scsiAuthIntrReadMegaBytes */ - (u32)(deve->read_bytes >> 20), - /* scsiAuthIntrWrittenMegaBytes */ - (u32)(deve->write_bytes >> 20), - /* FIXME: scsiAuthIntrHSOutCommands */ - 0, - /* scsiAuthIntrLastCreation */ - (u32)(((u32)deve->creation_time - - INITIAL_JIFFIES) * 100 / HZ), - /* FIXME: scsiAuthIntrRowStatus */ - "Ready"); - } - spin_unlock_irq(&se_nacl->device_list_lock); - - spin_lock(&se_tpg->acl_node_lock); - atomic_dec(&se_nacl->mib_ref_count); - smp_mb__after_atomic_dec(); - } - spin_unlock(&se_tpg->acl_node_lock); - - return 0; -} - -static const struct seq_operations scsi_auth_intr_seq_ops = { - .start = scsi_auth_intr_seq_start, - .next = scsi_auth_intr_seq_next, - .stop = scsi_auth_intr_seq_stop, - .show = scsi_auth_intr_seq_show -}; - -static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_auth_intr_seq_ops); -} - -static const struct file_operations scsi_auth_intr_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_auth_intr_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Attached Initiator Port Table: - * It lists the SCSI Initiators attached to one of the local Target ports. - * Iterates through all active TPGs and use active sessions from each TPG - * to list the info fo this table. - */ -static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) -{ - spin_lock_bh(&se_global->se_tpg_lock); - return seq_list_start(&se_global->g_se_tpg_list, *pos); -} - -static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_se_tpg_list, pos); -} - -static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) -{ - spin_unlock_bh(&se_global->se_tpg_lock); -} - -static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) -{ - struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, - se_tpg_list); - struct se_dev_entry *deve; - struct se_lun *lun; - struct se_node_acl *se_nacl; - struct se_session *se_sess; - unsigned char buf[64]; - int j; - - if (list_is_first(&se_tpg->se_tpg_list, - &se_global->g_se_tpg_list)) - seq_puts(seq, "inst dev port indx port_auth_indx port_name" - " port_ident\n"); - - if (!(se_tpg)) - return 0; - - spin_lock(&se_tpg->session_lock); - list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { - if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || - (!se_sess->se_node_acl) || - (!se_sess->se_node_acl->device_list)) - continue; - - atomic_inc(&se_sess->mib_ref_count); - smp_mb__after_atomic_inc(); - se_nacl = se_sess->se_node_acl; - atomic_inc(&se_nacl->mib_ref_count); - smp_mb__after_atomic_inc(); - spin_unlock(&se_tpg->session_lock); - - spin_lock_irq(&se_nacl->device_list_lock); - for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { - deve = &se_nacl->device_list[j]; - if (!(deve->lun_flags & - TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || - (!deve->se_lun)) - continue; - - lun = deve->se_lun; - if (!lun->lun_se_dev) - continue; - - memset(buf, 0, 64); - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) - TPG_TFO(se_tpg)->sess_get_initiator_sid( - se_sess, (unsigned char *)&buf[0], 64); - - seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", - /* scsiInstIndex */ - (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? - TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : - 0, - /* scsiDeviceIndex */ - lun->lun_se_dev->dev_index, - /* scsiPortIndex */ - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), - /* scsiAttIntrPortIndex */ - (TPG_TFO(se_tpg)->sess_get_index != NULL) ? - TPG_TFO(se_tpg)->sess_get_index(se_sess) : - 0, - /* scsiAttIntrPortAuthIntrIdx */ - se_nacl->acl_index, - /* scsiAttIntrPortName */ - se_nacl->initiatorname[0] ? - se_nacl->initiatorname : NONE, - /* scsiAttIntrPortIdentifier */ - buf); - } - spin_unlock_irq(&se_nacl->device_list_lock); - - spin_lock(&se_tpg->session_lock); - atomic_dec(&se_nacl->mib_ref_count); - smp_mb__after_atomic_dec(); - atomic_dec(&se_sess->mib_ref_count); - smp_mb__after_atomic_dec(); - } - spin_unlock(&se_tpg->session_lock); - - return 0; -} - -static const struct seq_operations scsi_att_intr_port_seq_ops = { - .start = scsi_att_intr_port_seq_start, - .next = scsi_att_intr_port_seq_next, - .stop = scsi_att_intr_port_seq_stop, - .show = scsi_att_intr_port_seq_show -}; - -static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_att_intr_port_seq_ops); -} - -static const struct file_operations scsi_att_intr_port_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_att_intr_port_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Logical Unit Table - */ -static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_lu_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -#define SCSI_LU_INDEX 1 -static int scsi_lu_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - int j; - char str[28]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" - " dev_type status state-bit num_cmds read_mbytes" - " write_mbytes resets full_stat hs_num_cmds creation_time\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - /* Fix LU state, if we can read it from the device */ - seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, - dev->dev_index, SCSI_LU_INDEX, - (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ - (strlen(DEV_T10_WWN(dev)->unit_serial)) ? - /* scsiLuWwnName */ - (char *)&DEV_T10_WWN(dev)->unit_serial[0] : - "None"); - - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - /* scsiLuVendorId */ - for (j = 0; j < 8; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? - DEV_T10_WWN(dev)->vendor[j] : 0x20; - str[8] = 0; - seq_printf(seq, " %s", str); - - /* scsiLuProductId */ - for (j = 0; j < 16; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? - DEV_T10_WWN(dev)->model[j] : 0x20; - str[16] = 0; - seq_printf(seq, " %s", str); - - /* scsiLuRevisionId */ - for (j = 0; j < 4; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? - DEV_T10_WWN(dev)->revision[j] : 0x20; - str[4] = 0; - seq_printf(seq, " %s", str); - - seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", - /* scsiLuPeripheralType */ - TRANSPORT(dev)->get_device_type(dev), - (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? - "available" : "notavailable", /* scsiLuStatus */ - "exposed", /* scsiLuState */ - (unsigned long long)dev->num_cmds, - /* scsiLuReadMegaBytes */ - (u32)(dev->read_bytes >> 20), - /* scsiLuWrittenMegaBytes */ - (u32)(dev->write_bytes >> 20), - dev->num_resets, /* scsiLuInResets */ - 0, /* scsiLuOutTaskSetFullStatus */ - 0, /* scsiLuHSInCommands */ - (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * - 100 / HZ)); - - return 0; -} - -static const struct seq_operations scsi_lu_seq_ops = { - .start = scsi_lu_seq_start, - .next = scsi_lu_seq_next, - .stop = scsi_lu_seq_stop, - .show = scsi_lu_seq_show -}; - -static int scsi_lu_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_lu_seq_ops); -} - -static const struct file_operations scsi_lu_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_lu_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/****************************************************************************/ - -/* - * Remove proc fs entries - */ -void remove_scsi_target_mib(void) -{ - remove_proc_entry("scsi_target/mib/scsi_inst", NULL); - remove_proc_entry("scsi_target/mib/scsi_dev", NULL); - remove_proc_entry("scsi_target/mib/scsi_port", NULL); - remove_proc_entry("scsi_target/mib/scsi_transport", NULL); - remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); - remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); - remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); - remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); - remove_proc_entry("scsi_target/mib/scsi_lu", NULL); - remove_proc_entry("scsi_target/mib", NULL); -} - -/* - * Create proc fs entries for the mib tables - */ -int init_scsi_target_mib(void) -{ - struct proc_dir_entry *dir_entry; - struct proc_dir_entry *scsi_inst_entry; - struct proc_dir_entry *scsi_dev_entry; - struct proc_dir_entry *scsi_port_entry; - struct proc_dir_entry *scsi_transport_entry; - struct proc_dir_entry *scsi_tgt_dev_entry; - struct proc_dir_entry *scsi_tgt_port_entry; - struct proc_dir_entry *scsi_auth_intr_entry; - struct proc_dir_entry *scsi_att_intr_port_entry; - struct proc_dir_entry *scsi_lu_entry; - - dir_entry = proc_mkdir("scsi_target/mib", NULL); - if (!(dir_entry)) { - printk(KERN_ERR "proc_mkdir() failed.\n"); - return -1; - } - - scsi_inst_entry = - create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); - if (scsi_inst_entry) - scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; - else - goto error; - - scsi_dev_entry = - create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); - if (scsi_dev_entry) - scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; - else - goto error; - - scsi_port_entry = - create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); - if (scsi_port_entry) - scsi_port_entry->proc_fops = &scsi_port_seq_fops; - else - goto error; - - scsi_transport_entry = - create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); - if (scsi_transport_entry) - scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; - else - goto error; - - scsi_tgt_dev_entry = - create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); - if (scsi_tgt_dev_entry) - scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; - else - goto error; - - scsi_tgt_port_entry = - create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); - if (scsi_tgt_port_entry) - scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; - else - goto error; - - scsi_auth_intr_entry = - create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); - if (scsi_auth_intr_entry) - scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; - else - goto error; - - scsi_att_intr_port_entry = - create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); - if (scsi_att_intr_port_entry) - scsi_att_intr_port_entry->proc_fops = - &scsi_att_intr_port_seq_fops; - else - goto error; - - scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); - if (scsi_lu_entry) - scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; - else - goto error; - - return 0; - -error: - printk(KERN_ERR "create_proc_entry() failed.\n"); - remove_scsi_target_mib(); - return -1; -} - -/* - * Initialize the index table for allocating unique row indexes to various mib - * tables - */ -void init_scsi_index_table(void) -{ - memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); - spin_lock_init(&scsi_index_table.lock); -} - -/* - * Allocate a new row index for the entry type specified - */ -u32 scsi_get_new_index(scsi_index_t type) -{ - u32 new_index; - - if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { - printk(KERN_ERR "Invalid index type %d\n", type); - return -1; - } - - spin_lock(&scsi_index_table.lock); - new_index = ++scsi_index_table.scsi_mib_index[type]; - if (new_index == 0) - new_index = ++scsi_index_table.scsi_mib_index[type]; - spin_unlock(&scsi_index_table.lock); - - return new_index; -} -EXPORT_SYMBOL(scsi_get_new_index); diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h deleted file mode 100644 index 277204633850..000000000000 --- a/drivers/target/target_core_mib.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef TARGET_CORE_MIB_H -#define TARGET_CORE_MIB_H - -typedef enum { - SCSI_INST_INDEX, - SCSI_DEVICE_INDEX, - SCSI_AUTH_INTR_INDEX, - SCSI_INDEX_TYPE_MAX -} scsi_index_t; - -struct scsi_index_table { - spinlock_t lock; - u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; -} ____cacheline_aligned; - -/* SCSI Port stats */ -struct scsi_port_stats { - u64 cmd_pdus; - u64 tx_data_octets; - u64 rx_data_octets; -} ____cacheline_aligned; - -extern int init_scsi_target_mib(void); -extern void remove_scsi_target_mib(void); -extern void init_scsi_index_table(void); -extern u32 scsi_get_new_index(scsi_index_t); - -#endif /*** TARGET_CORE_MIB_H ***/ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 742d24609a9b..f2a08477a68c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk( */ bd = blkdev_get_by_path(se_dev->se_dev_udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); - if (!(bd)) { - printk("pSCSI: blkdev_get_by_path() failed\n"); + if (IS_ERR(bd)) { + printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); return NULL; } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 158cecbec718..4a109835e420 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -282,6 +282,9 @@ int core_tmr_lun_reset( atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); + } else { + if (atomic_read(&task->task_execute_queue) != 0) + transport_remove_task_from_execute_queue(task, dev); } __transport_stop_task_timer(task, &flags); @@ -301,6 +304,7 @@ int core_tmr_lun_reset( DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); + atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); @@ -310,6 +314,7 @@ int core_tmr_lun_reset( } DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); + atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index abfa81a57115..c26f67467623 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( spin_lock_init(&acl->device_list_lock); spin_lock_init(&acl->nacl_sess_lock); atomic_set(&acl->acl_pr_ref_count, 0); - atomic_set(&acl->mib_ref_count, 0); acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; @@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) cpu_relax(); } -void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) -{ - while (atomic_read(&nacl->mib_ref_count) != 0) - cpu_relax(); -} - void core_tpg_clear_object_luns(struct se_portal_group *tpg) { int i, ret; @@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl( spin_unlock_bh(&tpg->session_lock); core_tpg_wait_for_nacl_pr_ref(acl); - core_tpg_wait_for_mib_ref(acl); core_clear_initiator_node_from_tpg(acl, tpg); core_free_device_list_for_node(acl, tpg); @@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register); int core_tpg_deregister(struct se_portal_group *se_tpg) { + struct se_node_acl *nacl, *nacl_tmp; + printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" " for endpoint: %s Portal Tag %u\n", (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? @@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) cpu_relax(); + /* + * Release any remaining demo-mode generated se_node_acl that have + * not been released because of TFO->tpg_check_demo_mode_cache() == 1 + * in transport_deregister_session(). + */ + spin_lock_bh(&se_tpg->acl_node_lock); + list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, + acl_list) { + list_del(&nacl->acl_list); + se_tpg->num_node_acls--; + spin_unlock_bh(&se_tpg->acl_node_lock); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); + + spin_lock_bh(&se_tpg->acl_node_lock); + } + spin_unlock_bh(&se_tpg->acl_node_lock); if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) core_tpg_release_virtual_lun0(se_tpg); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 28b6292ff298..4bbf6c147f89 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -379,6 +379,40 @@ void release_se_global(void) se_global = NULL; } +/* SCSI statistics table index */ +static struct scsi_index_table scsi_index_table; + +/* + * Initialize the index table for allocating unique row indexes to various mib + * tables. + */ +void init_scsi_index_table(void) +{ + memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); + spin_lock_init(&scsi_index_table.lock); +} + +/* + * Allocate a new row index for the entry type specified + */ +u32 scsi_get_new_index(scsi_index_t type) +{ + u32 new_index; + + if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { + printk(KERN_ERR "Invalid index type %d\n", type); + return -EINVAL; + } + + spin_lock(&scsi_index_table.lock); + new_index = ++scsi_index_table.scsi_mib_index[type]; + if (new_index == 0) + new_index = ++scsi_index_table.scsi_mib_index[type]; + spin_unlock(&scsi_index_table.lock); + + return new_index; +} + void transport_init_queue_obj(struct se_queue_obj *qobj) { atomic_set(&qobj->queue_cnt, 0); @@ -437,7 +471,6 @@ struct se_session *transport_init_session(void) } INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); - atomic_set(&se_sess->mib_ref_count, 0); return se_sess; } @@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess) transport_free_session(se_sess); return; } - /* - * Wait for possible reference in drivers/target/target_core_mib.c: - * scsi_att_intr_port_seq_show() - */ - while (atomic_read(&se_sess->mib_ref_count) != 0) - cpu_relax(); spin_lock_bh(&se_tpg->session_lock); list_del(&se_sess->sess_list); @@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess) spin_unlock_bh(&se_tpg->acl_node_lock); core_tpg_wait_for_nacl_pr_ref(se_nacl); - core_tpg_wait_for_mib_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, se_nacl); @@ -1181,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev) * * */ -static void transport_remove_task_from_execute_queue( +void transport_remove_task_from_execute_queue( struct se_task *task, struct se_device *dev) { @@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map( return ret; } + + BUG_ON(list_empty(se_mem_list)); /* * This is the normal path for all normal non BIDI and BIDI-COMMAND * WRITE payloads.. If we need to do BIDI READ passthrough for @@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) struct se_mem *se_mem = NULL, *se_mem_lout = NULL; u32 se_mem_cnt = 0, task_offset = 0; - BUG_ON(list_empty(cmd->t_task->t_mem_list)); + if (!list_empty(T_TASK(cmd)->t_mem_list)) + se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, + struct se_mem, se_list); ret = transport_do_se_mem_map(dev, task, cmd->t_task->t_mem_list, NULL, se_mem, @@ -5519,7 +5549,8 @@ static void transport_generic_wait_for_tasks( atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); } - if (!atomic_read(&T_TASK(cmd)->t_transport_active)) + if (!atomic_read(&T_TASK(cmd)->t_transport_active) || + atomic_read(&T_TASK(cmd)->t_transport_aborted)) goto remove; atomic_set(&T_TASK(cmd)->t_transport_stop, 1); @@ -5926,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev) atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); + } else { + if (atomic_read(&task->task_execute_queue) != 0) + transport_remove_task_from_execute_queue(task, dev); } __transport_stop_task_timer(task, &flags); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index f7a5dba3ca23..bf7c687519ef 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -4,7 +4,6 @@ menuconfig THERMAL tristate "Generic Thermal sysfs driver" - depends on NET help Generic Thermal Sysfs driver offers a generic mechanism for thermal management. Usually it's made up of one or more thermal diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 7d0e63c79280..713b7ea4a607 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock); static unsigned int thermal_event_seqnum; -static struct genl_family thermal_event_genl_family = { - .id = GENL_ID_GENERATE, - .name = THERMAL_GENL_FAMILY_NAME, - .version = THERMAL_GENL_VERSION, - .maxattr = THERMAL_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group thermal_event_mcgrp = { - .name = THERMAL_GENL_MCAST_GROUP_NAME, -}; - -static int genetlink_init(void); -static void genetlink_exit(void); - static int get_idr(struct idr *idr, struct mutex *lock, int *id) { int err; @@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) EXPORT_SYMBOL(thermal_zone_device_unregister); +#ifdef CONFIG_NET +static struct genl_family thermal_event_genl_family = { + .id = GENL_ID_GENERATE, + .name = THERMAL_GENL_FAMILY_NAME, + .version = THERMAL_GENL_VERSION, + .maxattr = THERMAL_GENL_ATTR_MAX, +}; + +static struct genl_multicast_group thermal_event_mcgrp = { + .name = THERMAL_GENL_MCAST_GROUP_NAME, +}; + int generate_netlink_event(u32 orig, enum events event) { struct sk_buff *skb; @@ -1301,6 +1299,15 @@ static int genetlink_init(void) return result; } +static void genetlink_exit(void) +{ + genl_unregister_family(&thermal_event_genl_family); +} +#else /* !CONFIG_NET */ +static inline int genetlink_init(void) { return 0; } +static inline void genetlink_exit(void) {} +#endif /* !CONFIG_NET */ + static int __init thermal_init(void) { int result = 0; @@ -1316,11 +1323,6 @@ static int __init thermal_init(void) return result; } -static void genetlink_exit(void) -{ - genl_unregister_family(&thermal_event_genl_family); -} - static void __exit thermal_exit(void) { class_unregister(&thermal_class); diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index beb1afa27d8d..7b951adac54b 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) s->rts = 0; sprintf(b, "max3100-%d", s->minor); - s->workqueue = create_freezeable_workqueue(b); + s->workqueue = create_freezable_workqueue(b); if (!s->workqueue) { dev_warn(&s->spi->dev, "cannot create workqueue\n"); return -EBUSY; diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 910870edf708..750b4f627315 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c @@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) struct max3107_port *s = container_of(port, struct max3107_port, port); /* Initialize work queue */ - s->workqueue = create_freezeable_workqueue("max3107"); + s->workqueue = create_freezable_workqueue("max3107"); if (!s->workqueue) { dev_err(&s->spi->dev, "Workqueue creation failed\n"); return -EBUSY; diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c index 93760b2ea172..1ef4df9bf7e4 100644 --- a/drivers/tty/serial/serial_cs.c +++ b/drivers/tty/serial/serial_cs.c @@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d041c6826e43..0f299b7aad60 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, mutex_lock(&usb_address0_mutex); - if (!udev->config && oldspeed == USB_SPEED_SUPER) { - /* Don't reset USB 3.0 devices during an initial setup */ - usb_set_device_state(udev, USB_STATE_DEFAULT); - } else { - /* Reset the device; full speed may morph to high speed */ - /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ - retval = hub_port_reset(hub, port1, udev, delay); - if (retval < 0) /* error or disconnect */ - goto fail; - /* success, speed is known */ - } + /* Reset the device; full speed may morph to high speed */ + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ + retval = hub_port_reset(hub, port1, udev, delay); + if (retval < 0) /* error or disconnect */ + goto fail; + /* success, speed is known */ + retval = -ENODEV; if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 44c595432d6f..81ce6a8e1d94 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ + { USB_DEVICE(0x04e8, 0x6601), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Keytouch QWERTY Panel keyboard */ + { USB_DEVICE(0x0926, 0x3333), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 3c6e1a058745..5e1495097ec3 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) if (unlikely(!skb)) break; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, - req->actual); - page = NULL; - if (req->actual < req->length) { /* Last fragment */ + if (skb->len == 0) { /* First fragment */ skb->protocol = htons(ETH_P_PHONET); skb_reset_mac_header(skb); - pskb_pull(skb, 1); + /* Can't use pskb_pull() on page in IRQ */ + memcpy(skb_put(skb, 1), page_address(page), 1); + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + skb->len == 0, req->actual); + page = NULL; + + if (req->actual < req->length) { /* Last fragment */ skb->dev = dev; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index e8f4f36fdf0b..a6f21b891f68 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -29,6 +29,7 @@ #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/of_address.h> /** * ehci_xilinx_of_setup - Initialize the device for ehci_reset() diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index fcbf4abbf381..0231814a97a5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci) } } -void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) +void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) { - void *addr; + struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num]; + void __iomem *addr; u32 temp; u64 temp_64; @@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, } } -void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) +static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { /* Fields are 32 bits wide, DMA addresses are in bytes */ int field_size = 32 / 8; @@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); } -void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, +static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1d0f45f0e7a6..a9534396e85b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, /***************** Streams structures manipulation *************************/ -void xhci_free_stream_ctx(struct xhci_hcd *xhci, +static void xhci_free_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) { @@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci, * The stream context array must be a power of 2, and can be as small as * 64 bytes or as large as 1MB. */ -struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, +static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, dma_addr_t *dma, gfp_t mem_flags) { @@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val &= DBOFF_MASK; xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" " from cap regs base addr\n", val); - xhci->dba = (void *) xhci->cap_regs + val; + xhci->dba = (void __iomem *) xhci->cap_regs + val; xhci_dbg_regs(xhci); xhci_print_run_regs(xhci); /* Set ir_set to interrupt register set 0 */ - xhci->ir_set = (void *) xhci->run_regs->ir_set; + xhci->ir_set = &xhci->run_regs->ir_set[0]; /* * Event ring setup: Allocate a normal ring, but also setup @@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); /* * XXX: Might need to set the Interrupter Moderation Register to diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e8211c1ce5a..3289bf4832c9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(cur_td->start_seg, dev->eps[ep_index].stopped_trb, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); @@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } trb = &state->new_deq_ptr->generic; if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && @@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) /* Scatter gather list entries may cross 64KB boundaries */ running_total = TRB_MAX_BUFF_SIZE - - (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; if (running_total != 0) num_trbs++; /* How many more 64KB chunks to transfer, how many more TRBs? */ - while (running_total < sg_dma_len(sg)) { + while (running_total < sg_dma_len(sg) && running_total < temp) { num_trbs++; running_total += TRB_MAX_BUFF_SIZE; } @@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) static void check_trb_math(struct urb *urb, int num_trbs, int running_total) { if (num_trbs != 0) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " "TRBs, %d left\n", __func__, urb->ep->desc.bEndpointAddress, num_trbs); if (running_total != urb->transfer_buffer_length) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " "queued %#x (%d), asked for %#x (%d)\n", __func__, urb->ep->desc.bEndpointAddress, @@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, sg = urb->sg; addr = (u64) sg_dma_address(sg); this_sg_len = sg_dma_len(sg); - trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; @@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); if (TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { + (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), @@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (running_total + trb_buff_len > urb->transfer_buffer_length) trb_buff_len = @@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_trbs = 0; /* How much data is (potentially) left before the 64KB boundary? */ running_total = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; /* If there's some data on this 64KB chunk, or we have to send a * zero-length transfer, we need at least one TRB @@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* How much data is in the first TRB? */ addr = (u64) urb->transfer_dma; trb_buff_len = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); - if (urb->transfer_buffer_length < trb_buff_len) + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; first_trb = true; @@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); td_len = urb->iso_frame_desc[i].length; - running_total = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; if (running_total != 0) num_trbs++; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 34cf4e165877..2083fc2179b2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci) /* * Set the run bit and wait for the host to be running. */ -int xhci_start(struct xhci_hcd *xhci) +static int xhci_start(struct xhci_hcd *xhci) { u32 temp; int ret; @@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd) #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -void xhci_event_ring_work(unsigned long arg) +static void xhci_event_ring_work(unsigned long arg) { unsigned long flags; int temp; @@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd) xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); xhci_writel(xhci, ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); @@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd) temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) /* Returns 1 if the arguments are OK; * returns 0 this is a root hub; returns -EINVAL for NULL pointers. */ -int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, +static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, const char *func) { struct xhci_hcd *xhci; @@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); } -void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, +static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state) { diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7f236fd22015..7f127df6dd55 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) } /* xHCI debugging */ -void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); +void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num); void xhci_print_registers(struct xhci_hcd *xhci); void xhci_dbg_regs(struct xhci_hcd *xhci); void xhci_print_run_regs(struct xhci_hcd *xhci); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 54a8bd1047d6..c292d5c499e7 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev, INIT_LIST_HEAD(&musb->out_bulk); hcd->uses_new_polling = 1; + hcd->has_tt = 1; musb->vbuserr_retry = VBUSERR_RETRY_COUNT; musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index d74a8113ae74..e6400be8a0f8 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -488,6 +488,15 @@ struct musb { unsigned set_address:1; unsigned test_mode:1; unsigned softconnect:1; + + u8 address; + u8 test_mode_nr; + u16 ackpend; /* ep0 */ + enum musb_g_ep0_state ep0_state; + struct usb_gadget g; /* the gadget */ + struct usb_gadget_driver *gadget_driver; /* its driver */ +#endif + /* * FIXME: Remove this flag. * @@ -501,14 +510,6 @@ struct musb { */ unsigned double_buffer_not_ok:1 __deprecated; - u8 address; - u8 test_mode_nr; - u16 ackpend; /* ep0 */ - enum musb_g_ep0_state ep0_state; - struct usb_gadget g; /* the gadget */ - struct usb_gadget_driver *gadget_driver; /* its driver */ -#endif - struct musb_hdrc_config *config; #ifdef MUSB_CONFIG_PROC_FS diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index a3f12333fc41..bc8badd16897 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb) static int omap2430_musb_exit(struct musb *musb) { + del_timer_sync(&musb_idle_timer); omap2430_low_level_exit(musb); otg_put_transceiver(musb->xceiv); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 7481ff8a49e4..0457813eebee 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ { } diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index b004b2a485c3..9c014e2ecd68 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb) __func__, status, endpoint); } else { tty = tty_port_tty_get(&port->port); - if (urb->actual_length) { - tty_insert_flip_string(tty, data, urb->actual_length); - tty_flip_buffer_push(tty); - } else - dbg("%s: empty read urb received", __func__); - tty_kref_put(tty); + if (tty) { + if (urb->actual_length) { + tty_insert_flip_string(tty, data, + urb->actual_length); + tty_flip_buffer_push(tty); + } else + dbg("%s: empty read urb received", __func__); + tty_kref_put(tty); + } /* Resubmit urb so we continue receiving */ if (status != -ESHUTDOWN) { diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 15a5d89b7f39..1c11959a7d58 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -27,6 +27,7 @@ #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> +#include <linux/usb/cdc.h> #include "visor.h" /* @@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial, dbg("%s", __func__); + /* + * some Samsung Android phones in modem mode have the same ID + * as SPH-I500, but they are ACM devices, so dont bind to them + */ + if (id->idVendor == SAMSUNG_VENDOR_ID && + id->idProduct == SAMSUNG_SPH_I500_ID && + serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && + serial->dev->descriptor.bDeviceSubClass == + USB_CDC_SUBCLASS_ACM) + return -ENODEV; + if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index 8010aaeb5adb..dd0e84a9bd2f 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c @@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi) lcd->spi = spi; lcd->power = FB_BLANK_POWERDOWN; lcd->buffer = kzalloc(8, GFP_KERNEL); + if (!lcd->buffer) { + ret = -ENOMEM; + goto out_free_lcd; + } ld = lcd_device_register("ltv350qv", &spi->dev, lcd, <v_ops); if (IS_ERR(ld)) { ret = PTR_ERR(ld); - goto out_free_lcd; + goto out_free_buffer; } lcd->ld = ld; @@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi) out_unregister: lcd_device_unregister(ld); +out_free_buffer: + kfree(lcd->buffer); out_free_lcd: kfree(lcd); return ret; @@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi) ltv350qv_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->ld); + kfree(lcd->buffer); kfree(lcd); return 0; diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index eca855a55c0d..3de4ba0260a5 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op) struct cpwd *p = dev_get_drvdata(&op->dev); int i; - for (i = 0; i < 4; i++) { + for (i = 0; i < WD_NUMDEVS; i++) { misc_deregister(&p->devs[i].misc); if (!p->enabled) { diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 24b966d5061a..204a5603c4ae 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) return 0; } -static void __devexit hpwdt_exit_nmi_decoding(void) +static void hpwdt_exit_nmi_decoding(void) { unregister_die_notifier(&die_notifier); if (cru_rom_addr) @@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) return 0; } -static void __devexit hpwdt_exit_nmi_decoding(void) +static void hpwdt_exit_nmi_decoding(void) { } #endif /* CONFIG_HPWDT_NMI_DECODING */ diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index c7d67e9a7465..79906255eeb6 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c @@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = { static int __init fitpc2_wdt_init(void) { int err; + const char *brd_name; - if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) + brd_name = dmi_get_system_info(DMI_BOARD_NAME); + + if (!brd_name || !strstr(brd_name, "SBC-FITPC2")) return -ENODEV; - pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); + pr_info("%s found\n", brd_name); if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 0461858e07d0..b61ab1c54293 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr) sch311x_sio_outb(sio_config_port, 0x07, 0x0a); /* Check if Logical Device Register is currently active */ - if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0) + if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0) printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); /* Get the base address of the runtime registers */ diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index a6c12dec91a1..df2a64dc9672 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c @@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void) outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ outb_p(0x30, WDT_EFER); /* select CR30 */ c = inb_p(WDT_EFDR); - outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ + outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ return 0; } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 43f9f02c7db0..b1661cd416b0 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages) /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 74681478100a..89987a7bf26f 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) BUG_ON(irq == -1); #ifdef CONFIG_SMP - cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); + cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); #endif clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); @@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void) /* By default all event channels notify CPU#0. */ for_each_irq_desc(i, desc) { - cpumask_copy(desc->affinity, cpumask_of(0)); + cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); } #endif @@ -376,81 +376,69 @@ static void unmask_evtchn(int port) put_cpu(); } -static int get_nr_hw_irqs(void) +static int xen_allocate_irq_dynamic(void) { - int ret = 1; + int first = 0; + int irq; #ifdef CONFIG_X86_IO_APIC - ret = get_nr_irqs_gsi(); + /* + * For an HVM guest or domain 0 which see "real" (emulated or + * actual repectively) GSIs we allocate dynamic IRQs + * e.g. those corresponding to event channels or MSIs + * etc. from the range above those "real" GSIs to avoid + * collisions. + */ + if (xen_initial_domain() || xen_hvm_domain()) + first = get_nr_irqs_gsi(); #endif - return ret; -} +retry: + irq = irq_alloc_desc_from(first, -1); -static int find_unbound_pirq(int type) -{ - int rc, i; - struct physdev_get_free_pirq op_get_free_pirq; - op_get_free_pirq.type = type; + if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { + printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); + first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); + goto retry; + } - rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); - if (!rc) - return op_get_free_pirq.pirq; + if (irq < 0) + panic("No available IRQ to bind to: increase nr_irqs!\n"); - for (i = 0; i < nr_irqs; i++) { - if (pirq_to_irq[i] < 0) - return i; - } - return -1; + return irq; } -static int find_unbound_irq(void) +static int xen_allocate_irq_gsi(unsigned gsi) { - struct irq_data *data; - int irq, res; - int bottom = get_nr_hw_irqs(); - int top = nr_irqs-1; - - if (bottom == nr_irqs) - goto no_irqs; + int irq; - /* This loop starts from the top of IRQ space and goes down. - * We need this b/c if we have a PCI device in a Xen PV guest - * we do not have an IO-APIC (though the backend might have them) - * mapped in. To not have a collision of physical IRQs with the Xen - * event channels start at the top of the IRQ space for virtual IRQs. + /* + * A PV guest has no concept of a GSI (since it has no ACPI + * nor access to/knowledge of the physical APICs). Therefore + * all IRQs are dynamically allocated from the entire IRQ + * space. */ - for (irq = top; irq > bottom; irq--) { - data = irq_get_irq_data(irq); - /* only 15->0 have init'd desc; handle irq > 16 */ - if (!data) - break; - if (data->chip == &no_irq_chip) - break; - if (data->chip != &xen_dynamic_chip) - continue; - if (irq_info[irq].type == IRQT_UNBOUND) - return irq; - } - - if (irq == bottom) - goto no_irqs; + if (xen_pv_domain() && !xen_initial_domain()) + return xen_allocate_irq_dynamic(); - res = irq_alloc_desc_at(irq, -1); + /* Legacy IRQ descriptors are already allocated by the arch. */ + if (gsi < NR_IRQS_LEGACY) + return gsi; - if (WARN_ON(res != irq)) - return -1; + irq = irq_alloc_desc_at(gsi, -1); + if (irq < 0) + panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); return irq; - -no_irqs: - panic("No available IRQ to bind to: increase nr_irqs!\n"); } -static bool identity_mapped_irq(unsigned irq) +static void xen_free_irq(unsigned irq) { - /* identity map all the hardware irqs */ - return irq < get_nr_hw_irqs(); + /* Legacy IRQ descriptors are managed by the arch. */ + if (irq < NR_IRQS_LEGACY) + return; + + irq_free_desc(irq); } static void pirq_unmask_notify(int irq) @@ -486,7 +474,7 @@ static bool probing_irq(int irq) return desc && desc->action == NULL; } -static unsigned int startup_pirq(unsigned int irq) +static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; struct irq_info *info = info_for_irq(irq); @@ -524,9 +512,15 @@ out: return 0; } -static void shutdown_pirq(unsigned int irq) +static unsigned int startup_pirq(struct irq_data *data) +{ + return __startup_pirq(data->irq); +} + +static void shutdown_pirq(struct irq_data *data) { struct evtchn_close close; + unsigned int irq = data->irq; struct irq_info *info = info_for_irq(irq); int evtchn = evtchn_from_irq(irq); @@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq) info->evtchn = 0; } -static void enable_pirq(unsigned int irq) +static void enable_pirq(struct irq_data *data) { - startup_pirq(irq); + startup_pirq(data); } -static void disable_pirq(unsigned int irq) +static void disable_pirq(struct irq_data *data) { } -static void ack_pirq(unsigned int irq) +static void ack_pirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(irq); + int evtchn = evtchn_from_irq(data->irq); - move_native_irq(irq); + move_native_irq(data->irq); if (VALID_EVTCHN(evtchn)) { mask_evtchn(evtchn); @@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq) } } -static void end_pirq(unsigned int irq) -{ - int evtchn = evtchn_from_irq(irq); - struct irq_desc *desc = irq_to_desc(irq); - - if (WARN_ON(!desc)) - return; - - if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == - (IRQ_DISABLED|IRQ_PENDING)) { - shutdown_pirq(irq); - } else if (VALID_EVTCHN(evtchn)) { - unmask_evtchn(evtchn); - pirq_unmask_notify(irq); - } -} - static int find_irq_by_gsi(unsigned gsi) { int irq; @@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) goto out; /* XXX need refcount? */ } - /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore - * we are using the !xen_initial_domain() to drop in the function.*/ - if (identity_mapped_irq(gsi) || (!xen_initial_domain() && - xen_pv_domain())) { - irq = gsi; - irq_alloc_desc_at(irq, -1); - } else - irq = find_unbound_irq(); + irq = xen_allocate_irq_gsi(gsi); set_irq_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, name); @@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { - irq_free_desc(irq); + xen_free_irq(irq); irq = -ENOSPC; goto out; } @@ -677,12 +647,29 @@ out: #include <linux/msi.h> #include "../pci/msi.h" +static int find_unbound_pirq(int type) +{ + int rc, i; + struct physdev_get_free_pirq op_get_free_pirq; + op_get_free_pirq.type = type; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); + if (!rc) + return op_get_free_pirq.pirq; + + for (i = 0; i < nr_irqs; i++) { + if (pirq_to_irq[i] < 0) + return i; + } + return -1; +} + void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) { spin_lock(&irq_mapping_update_lock); if (alloc & XEN_ALLOC_IRQ) { - *irq = find_unbound_irq(); + *irq = xen_allocate_irq_dynamic(); if (*irq == -1) goto out; } @@ -732,7 +719,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) spin_lock(&irq_mapping_update_lock); - irq = find_unbound_irq(); + irq = xen_allocate_irq_dynamic(); if (irq == -1) goto out; @@ -741,7 +728,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) if (rc) { printk(KERN_WARNING "xen map irq failed %d\n", rc); - irq_free_desc(irq); + xen_free_irq(irq); irq = -1; goto out; @@ -779,11 +766,12 @@ int xen_destroy_irq(int irq) printk(KERN_WARNING "unmap irq failed %d\n", rc); goto out; } - pirq_to_irq[info->u.pirq.pirq] = -1; } + pirq_to_irq[info->u.pirq.pirq] = -1; + irq_info[irq] = mk_unbound_info(); - irq_free_desc(irq); + xen_free_irq(irq); out: spin_unlock(&irq_mapping_update_lock); @@ -814,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) irq = evtchn_to_irq[evtchn]; if (irq == -1) { - irq = find_unbound_irq(); + irq = xen_allocate_irq_dynamic(); set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, handle_fasteoi_irq, "event"); @@ -839,7 +827,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) irq = per_cpu(ipi_to_irq, cpu)[ipi]; if (irq == -1) { - irq = find_unbound_irq(); + irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; @@ -875,7 +863,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) irq = per_cpu(virq_to_irq, cpu)[virq]; if (irq == -1) { - irq = find_unbound_irq(); + irq = xen_allocate_irq_dynamic(); set_irq_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "virq"); @@ -934,7 +922,7 @@ static void unbind_from_irq(unsigned int irq) if (irq_info[irq].type != IRQT_UNBOUND) { irq_info[irq] = mk_unbound_info(); - irq_free_desc(irq); + xen_free_irq(irq); } spin_unlock(&irq_mapping_update_lock); @@ -990,7 +978,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, if (irq < 0) return irq; - irqflags |= IRQF_NO_SUSPEND; + irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); @@ -1234,11 +1222,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) return 0; } -static int set_affinity_irq(unsigned irq, const struct cpumask *dest) +static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, + bool force) { unsigned tcpu = cpumask_first(dest); - return rebind_irq_to_cpu(irq, tcpu); + return rebind_irq_to_cpu(data->irq, tcpu); } int resend_irq_on_evtchn(unsigned int irq) @@ -1257,35 +1246,35 @@ int resend_irq_on_evtchn(unsigned int irq) return 1; } -static void enable_dynirq(unsigned int irq) +static void enable_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(irq); + int evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } -static void disable_dynirq(unsigned int irq) +static void disable_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(irq); + int evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) mask_evtchn(evtchn); } -static void ack_dynirq(unsigned int irq) +static void ack_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(irq); + int evtchn = evtchn_from_irq(data->irq); - move_masked_irq(irq); + move_masked_irq(data->irq); if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } -static int retrigger_dynirq(unsigned int irq) +static int retrigger_dynirq(struct irq_data *data) { - int evtchn = evtchn_from_irq(irq); + int evtchn = evtchn_from_irq(data->irq); struct shared_info *sh = HYPERVISOR_shared_info; int ret = 0; @@ -1334,7 +1323,7 @@ static void restore_cpu_pirqs(void) printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); - startup_pirq(irq); + __startup_pirq(irq); } } @@ -1445,7 +1434,6 @@ void xen_poll_irq(int irq) void xen_irq_resume(void) { unsigned int cpu, irq, evtchn; - struct irq_desc *desc; init_evtchn_cpu_bindings(); @@ -1465,66 +1453,48 @@ void xen_irq_resume(void) restore_cpu_ipis(cpu); } - /* - * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These - * are not handled by the IRQ core. - */ - for_each_irq_desc(irq, desc) { - if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) - continue; - if (desc->status & IRQ_DISABLED) - continue; - - evtchn = evtchn_from_irq(irq); - if (evtchn == -1) - continue; - - unmask_evtchn(evtchn); - } - restore_cpu_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { - .name = "xen-dyn", + .name = "xen-dyn", - .disable = disable_dynirq, - .mask = disable_dynirq, - .unmask = enable_dynirq, + .irq_disable = disable_dynirq, + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, - .eoi = ack_dynirq, - .set_affinity = set_affinity_irq, - .retrigger = retrigger_dynirq, + .irq_eoi = ack_dynirq, + .irq_set_affinity = set_affinity_irq, + .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_pirq_chip __read_mostly = { - .name = "xen-pirq", + .name = "xen-pirq", - .startup = startup_pirq, - .shutdown = shutdown_pirq, + .irq_startup = startup_pirq, + .irq_shutdown = shutdown_pirq, - .enable = enable_pirq, - .unmask = enable_pirq, + .irq_enable = enable_pirq, + .irq_unmask = enable_pirq, - .disable = disable_pirq, - .mask = disable_pirq, + .irq_disable = disable_pirq, + .irq_mask = disable_pirq, - .ack = ack_pirq, - .end = end_pirq, + .irq_ack = ack_pirq, - .set_affinity = set_affinity_irq, + .irq_set_affinity = set_affinity_irq, - .retrigger = retrigger_dynirq, + .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_percpu_chip __read_mostly = { - .name = "xen-percpu", + .name = "xen-percpu", - .disable = disable_dynirq, - .mask = disable_dynirq, - .unmask = enable_dynirq, + .irq_disable = disable_dynirq, + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, - .ack = ack_dynirq, + .irq_ack = ack_dynirq, }; int xen_set_callback_via(uint64_t via) diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index db8c4c4ac880..24177272bcb8 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; #ifdef CONFIG_PM_SLEEP static int xen_hvm_suspend(void *data) { + int err; struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; int *cancelled = data; BUG_ON(!irqs_disabled()); + err = sysdev_suspend(PMSG_SUSPEND); + if (err) { + printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n", + err); + return err; + } + *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); xen_hvm_post_suspend(*cancelled); @@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data) xen_timer_resume(); } + sysdev_resume(); + return 0; } diff --git a/fs/afs/write.c b/fs/afs/write.c index 15690bb1d3b5..789b3afb3423 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, candidate->first = candidate->last = index; candidate->offset_first = from; candidate->to_last = to; + INIT_LIST_HEAD(&candidate->link); candidate->usage = 1; candidate->state = AFS_WBACK_PENDING; init_waitqueue_head(&candidate->waitq); @@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx) call_rcu(&ctx->rcu_head, ctx_rcu_free); } -#define get_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - atomic_inc(&(kioctx)->users); \ -} while (0) -#define put_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ - __put_ioctx(kioctx); \ -} while (0) +static inline void get_ioctx(struct kioctx *kioctx) +{ + BUG_ON(atomic_read(&kioctx->users) <= 0); + atomic_inc(&kioctx->users); +} + +static inline int try_get_ioctx(struct kioctx *kioctx) +{ + return atomic_inc_not_zero(&kioctx->users); +} + +static inline void put_ioctx(struct kioctx *kioctx) +{ + BUG_ON(atomic_read(&kioctx->users) <= 0); + if (unlikely(atomic_dec_and_test(&kioctx->users))) + __put_ioctx(kioctx); +} /* ioctx_alloc * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. @@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) rcu_read_lock(); hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { - if (ctx->user_id == ctx_id && !ctx->dead) { - get_ioctx(ctx); + /* + * RCU protects us against accessing freed memory but + * we have to be careful not to get a reference when the + * reference count already dropped to 0 (ctx->dead test + * is unreliable because of races). + */ + if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ ret = ctx; break; } @@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, goto out_put_req; spin_lock_irq(&ctx->ctx_lock); + /* + * We could have raced with io_destroy() and are currently holding a + * reference to ctx which should be destroyed. We cannot submit IO + * since ctx gets freed as soon as io_submit() puts its reference. The + * check here is reliable: io_destroy() sets ctx->dead before waiting + * for outstanding IO and the barrier between these two is realized by + * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we + * increment ctx->reqs_active before checking for ctx->dead and the + * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we + * don't see ctx->dead set here, io_destroy() waits for our IO to + * finish. + */ + if (ctx->dead) { + spin_unlock_irq(&ctx->ctx_lock); + ret = -EINVAL; + goto out_put_req; + } aio_run_iocb(req); if (!list_empty(&ctx->run_list)) { /* drain the run list */ diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9c..889287019599 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); if (ret) goto out_del; + /* + * bdev could be deleted beneath us which would implicitly destroy + * the holder directory. Hold on to it. + */ + kobject_get(bdev->bd_part->holder_dir); list_add(&holder->list, &bdev->bd_holder_disks); goto out_unlock; @@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + kobject_put(bdev->bd_part->holder_dir); list_del_init(&holder->list); kfree(holder); } @@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); * flush_disk - invalidates all buffer-cache entries on a disk * * @bdev: struct block device to be flushed + * @kill_dirty: flag to guide handling of dirty inodes * * Invalidates all buffer-cache entries on a disk. It should be called * when a disk has been changed -- either by a media change or online * resize. */ -static void flush_disk(struct block_device *bdev) +static void flush_disk(struct block_device *bdev, bool kill_dirty) { - if (__invalidate_device(bdev)) { + if (__invalidate_device(bdev, kill_dirty)) { char name[BDEVNAME_SIZE] = ""; if (bdev->bd_disk) @@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) "%s: detected capacity change from %lld to %lld\n", name, bdev_size, disk_size); i_size_write(bdev->bd_inode, disk_size); - flush_disk(bdev); + flush_disk(bdev, false); } } EXPORT_SYMBOL(check_disk_size_change); @@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev) if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; - flush_disk(bdev); + flush_disk(bdev, true); if (bdops->revalidate_disk) bdops->revalidate_disk(bdev->bd_disk); return 1; @@ -1215,12 +1222,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) res = __blkdev_get(bdev, mode, 0); - /* __blkdev_get() may alter read only status, check it afterwards */ - if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { - __blkdev_put(bdev, mode, 0); - res = -EACCES; - } - if (whole) { /* finish claiming */ mutex_lock(&bdev->bd_mutex); @@ -1298,6 +1299,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, if (err) return ERR_PTR(err); + if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { + blkdev_put(bdev, mode); + return ERR_PTR(-EACCES); + } + return bdev; } EXPORT_SYMBOL(blkdev_get_by_path); @@ -1601,7 +1607,7 @@ fail: } EXPORT_SYMBOL(lookup_bdev); -int __invalidate_device(struct block_device *bdev) +int __invalidate_device(struct block_device *bdev, bool kill_dirty) { struct super_block *sb = get_super(bdev); int res = 0; @@ -1614,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev) * hold). */ shrink_dcache_sb(sb); - res = invalidate_inodes(sb); + res = invalidate_inodes(sb, kill_dirty); drop_super(sb); } invalidate_bdev(bdev); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c98b3af6052..7f78cc78fdd0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -729,6 +729,15 @@ struct btrfs_space_info { u64 disk_total; /* total bytes on disk, takes mirrors into account */ + /* + * we bump reservation progress every time we decrement + * bytes_reserved. This way people waiting for reservations + * know something good has happened and they can check + * for progress. The number here isn't to be trusted, it + * just shows reclaim activity + */ + unsigned long reservation_progress; + int full; /* indicates that we cannot allocate any more chunks for this space */ int force_alloc; /* set if we need to force a chunk alloc for @@ -1254,6 +1263,7 @@ struct btrfs_root { #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) +#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2218,6 +2228,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end); int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes); +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f3c96fc01439..7b3089b5c2df 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3342,15 +3342,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 max_reclaim; u64 reclaimed = 0; long time_left; - int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; int loops = 0; + unsigned long progress; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; smp_mb(); reserved = space_info->bytes_reserved; + progress = space_info->reservation_progress; if (reserved == 0) return 0; @@ -3365,31 +3366,36 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) { - loops = 0; + if (reserved > space_info->bytes_reserved) reclaimed += reserved - space_info->bytes_reserved; - } else { - loops++; - } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); + loops++; + if (reserved == 0 || reclaimed >= max_reclaim) break; if (trans && trans->transaction->blocked) return -EAGAIN; - __set_current_state(TASK_INTERRUPTIBLE); - time_left = schedule_timeout(pause); + time_left = schedule_timeout_interruptible(1); /* We were interrupted, exit */ if (time_left) break; - pause <<= 1; - if (pause > HZ / 10) - pause = HZ / 10; + /* we've kicked the IO a few times, if anything has been freed, + * exit. There is no sense in looping here for a long time + * when we really need to commit the transaction, or there are + * just too many writers without enough free space + */ + + if (loops > 3) { + smp_mb(); + if (progress != space_info->reservation_progress) + break; + } } return reclaimed >= to_reclaim; @@ -3612,6 +3618,7 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes) { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; + space_info->reservation_progress++; spin_unlock(&space_info->lock); } } @@ -3844,6 +3851,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; sinfo->bytes_reserved -= num_bytes; + sinfo->reservation_progress++; block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } @@ -4005,7 +4013,6 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) to_reserve = 0; } spin_unlock(&BTRFS_I(inode)->accounting_lock); - to_reserve += calc_csum_metadata_size(inode, num_bytes); ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); if (ret) @@ -4133,6 +4140,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, btrfs_set_block_group_used(&cache->item, old_val); cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; + cache->space_info->reservation_progress++; cache->space_info->bytes_used += num_bytes; cache->space_info->disk_used += num_bytes * factor; spin_unlock(&cache->lock); @@ -4184,6 +4192,7 @@ static int pin_down_extent(struct btrfs_root *root, if (reserved) { cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; + cache->space_info->reservation_progress++; } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); @@ -4234,6 +4243,7 @@ static int update_reserved_bytes(struct btrfs_block_group_cache *cache, space_info->bytes_readonly += num_bytes; cache->reserved -= num_bytes; space_info->bytes_reserved -= num_bytes; + space_info->reservation_progress++; } spin_unlock(&cache->lock); spin_unlock(&space_info->lock); @@ -4712,6 +4722,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, if (ret) { spin_lock(&cache->space_info->lock); cache->space_info->bytes_reserved -= buf->len; + cache->space_info->reservation_progress++; spin_unlock(&cache->space_info->lock); } goto out; @@ -5376,7 +5387,7 @@ again: num_bytes, data, 1); goto again; } - if (ret == -ENOSPC) { + if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); @@ -8065,6 +8076,13 @@ out: return ret; } +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type) +{ + u64 alloc_flags = get_alloc_profile(root, type); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); +} + /* * helper to account the unused space of all the readonly block group in the * list. takes mirrors into account. diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 92ac5192c518..714adc4ac4c2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, - unsigned long bits) + unsigned long bits, int contig) { struct rb_node *node; struct extent_state *state; u64 cur_start = *start; u64 total_bytes = 0; + u64 last = 0; int found = 0; if (search_end <= cur_start) { @@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, state = rb_entry(node, struct extent_state, rb_node); if (state->start > search_end) break; - if (state->end >= cur_start && (state->state & bits)) { + if (contig && found && state->start > last + 1) + break; + if (state->end >= cur_start && (state->state & bits) == bits) { total_bytes += min(search_end, state->end) + 1 - max(cur_start, state->start); if (total_bytes >= max_bytes) @@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, *start = state->start; found = 1; } + last = state->end; + } else if (contig && found) { + break; } node = rb_next(node); if (!node) @@ -2912,6 +2918,46 @@ out: return sector; } +/* + * helper function for fiemap, which doesn't want to see any holes. + * This maps until we find something past 'last' + */ +static struct extent_map *get_extent_skip_holes(struct inode *inode, + u64 offset, + u64 last, + get_extent_t *get_extent) +{ + u64 sectorsize = BTRFS_I(inode)->root->sectorsize; + struct extent_map *em; + u64 len; + + if (offset >= last) + return NULL; + + while(1) { + len = last - offset; + if (len == 0) + break; + len = (len + sectorsize - 1) & ~(sectorsize - 1); + em = get_extent(inode, NULL, 0, offset, len, 0); + if (!em || IS_ERR(em)) + return em; + + /* if this isn't a hole return it */ + if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && + em->block_start != EXTENT_MAP_HOLE) { + return em; + } + + /* this is a hole, advance to the next extent */ + offset = extent_map_end(em); + free_extent_map(em); + if (offset >= last) + break; + } + return NULL; +} + int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { @@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u32 flags = 0; u32 found_type; u64 last; + u64 last_for_get_extent = 0; u64 disko = 0; + u64 isize = i_size_read(inode); struct btrfs_key found_key; struct extent_map *em = NULL; struct extent_state *cached_state = NULL; struct btrfs_path *path; struct btrfs_file_extent_item *item; int end = 0; - u64 em_start = 0, em_len = 0; + u64 em_start = 0; + u64 em_len = 0; + u64 em_end = 0; unsigned long emflags; - int hole = 0; if (len == 0) return -EINVAL; @@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return -ENOMEM; path->leave_spinning = 1; + /* + * lookup the last file extent. We're not using i_size here + * because there might be preallocation past i_size + */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, path, inode->i_ino, -1, 0); if (ret < 0) { @@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); - /* No extents, just return */ + /* No extents, but there might be delalloc bits */ if (found_key.objectid != inode->i_ino || found_type != BTRFS_EXTENT_DATA_KEY) { - btrfs_free_path(path); - return 0; + /* have to trust i_size as the end */ + last = (u64)-1; + last_for_get_extent = isize; + } else { + /* + * remember the start of the last extent. There are a + * bunch of different factors that go into the length of the + * extent, so its much less complex to remember where it started + */ + last = found_key.offset; + last_for_get_extent = last + 1; } - last = found_key.offset; btrfs_free_path(path); + /* + * we might have some extents allocated but more delalloc past those + * extents. so, we trust isize unless the start of the last extent is + * beyond isize + */ + if (last < isize) { + last = (u64)-1; + last_for_get_extent = isize; + } + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, off, max - off, 0); + + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); if (!em) goto out; if (IS_ERR(em)) { @@ -2973,22 +3046,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } while (!end) { - hole = 0; - off = em->start + em->len; - if (off >= max) - end = 1; + u64 offset_in_extent; - if (em->block_start == EXTENT_MAP_HOLE) { - hole = 1; - goto next; - } + /* break if the extent we found is outside the range */ + if (em->start >= max || extent_map_end(em) < off) + break; - em_start = em->start; - em_len = em->len; + /* + * get_extent may return an extent that starts before our + * requested range. We have to make sure the ranges + * we return to fiemap always move forward and don't + * overlap, so adjust the offsets here + */ + em_start = max(em->start, off); + /* + * record the offset from the start of the extent + * for adjusting the disk offset below + */ + offset_in_extent = em_start - em->start; + em_end = extent_map_end(em); + em_len = em_end - em_start; + emflags = em->flags; disko = 0; flags = 0; + /* + * bump off for our next call to get_extent + */ + off = extent_map_end(em); + if (off >= max) + end = 1; + if (em->block_start == EXTENT_MAP_LAST_BYTE) { end = 1; flags |= FIEMAP_EXTENT_LAST; @@ -2999,42 +3088,34 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, flags |= (FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN); } else { - disko = em->block_start; + disko = em->block_start + offset_in_extent; } if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) flags |= FIEMAP_EXTENT_ENCODED; -next: - emflags = em->flags; free_extent_map(em); em = NULL; - if (!end) { - em = get_extent(inode, NULL, 0, off, max - off, 0); - if (!em) - goto out; - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto out; - } - emflags = em->flags; - } - - if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { + if ((em_start >= last) || em_len == (u64)-1 || + (last == (u64)-1 && isize <= em_end)) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - if (em_start == last) { + /* now scan forward to see if this is really the last extent. */ + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto out; + } + if (!em) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - - if (!hole) { - ret = fiemap_fill_next_extent(fieinfo, em_start, disko, - em_len, flags); - if (ret) - goto out_free; - } + ret = fiemap_fill_next_extent(fieinfo, em_start, disko, + em_len, flags); + if (ret) + goto out_free; } out_free: free_extent_map(em); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd061..9318dfefd59c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -191,7 +191,7 @@ void extent_io_exit(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, - u64 max_bytes, unsigned long bits); + u64 max_bytes, unsigned long bits, int contig); void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7084140d5940..f447b783bb84 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -70,6 +70,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, /* Flush processor's dcache for this page */ flush_dcache_page(page); + + /* + * if we get a partial write, we can end up with + * partially up to date pages. These add + * a lot of complexity, so make sure they don't + * happen by forcing this copy to be retried. + * + * The rest of the btrfs_file_write code will fall + * back to page at a time copies after we return 0. + */ + if (!PageUptodate(page) && copied < count) + copied = 0; + iov_iter_advance(i, copied); write_bytes -= copied; total_copied += copied; @@ -763,6 +776,27 @@ out: } /* + * on error we return an unlocked page and the error value + * on success we return a locked page and 0 + */ +static int prepare_uptodate_page(struct page *page, u64 pos) +{ + int ret = 0; + + if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { + ret = btrfs_readpage(NULL, page); + if (ret) + return ret; + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + return -EIO; + } + } + return 0; +} + +/* * this gets pages into the page cache and locks them down, it also properly * waits for data=ordered extents to finish before allowing the pages to be * modified. @@ -777,6 +811,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, unsigned long index = pos >> PAGE_CACHE_SHIFT; struct inode *inode = fdentry(file)->d_inode; int err = 0; + int faili = 0; u64 start_pos; u64 last_pos; @@ -794,15 +829,24 @@ again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - int c; - for (c = i - 1; c >= 0; c--) { - unlock_page(pages[c]); - page_cache_release(pages[c]); - } - return -ENOMEM; + faili = i - 1; + err = -ENOMEM; + goto fail; + } + + if (i == 0) + err = prepare_uptodate_page(pages[i], pos); + if (i == num_pages - 1) + err = prepare_uptodate_page(pages[i], + pos + write_bytes); + if (err) { + page_cache_release(pages[i]); + faili = i - 1; + goto fail; } wait_on_page_writeback(pages[i]); } + err = 0; if (start_pos < inode->i_size) { struct btrfs_ordered_extent *ordered; lock_extent_bits(&BTRFS_I(inode)->io_tree, @@ -842,6 +886,14 @@ again: WARN_ON(!PageLocked(pages[i])); } return 0; +fail: + while (faili >= 0) { + unlock_page(pages[faili]); + page_cache_release(pages[faili]); + faili--; + } + return err; + } static ssize_t btrfs_file_aio_write(struct kiocb *iocb, @@ -851,7 +903,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, struct file *file = iocb->ki_filp; struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; - struct page *pinned[2]; struct page **pages = NULL; struct iov_iter i; loff_t *ppos = &iocb->ki_pos; @@ -872,9 +923,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || (file->f_flags & O_DIRECT)); - pinned[0] = NULL; - pinned[1] = NULL; - start_pos = pos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); @@ -962,32 +1010,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, first_index = pos >> PAGE_CACHE_SHIFT; last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; - /* - * there are lots of better ways to do this, but this code - * makes sure the first and last page in the file range are - * up to date and ready for cow - */ - if ((pos & (PAGE_CACHE_SIZE - 1))) { - pinned[0] = grab_cache_page(inode->i_mapping, first_index); - if (!PageUptodate(pinned[0])) { - ret = btrfs_readpage(NULL, pinned[0]); - BUG_ON(ret); - wait_on_page_locked(pinned[0]); - } else { - unlock_page(pinned[0]); - } - } - if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { - pinned[1] = grab_cache_page(inode->i_mapping, last_index); - if (!PageUptodate(pinned[1])) { - ret = btrfs_readpage(NULL, pinned[1]); - BUG_ON(ret); - wait_on_page_locked(pinned[1]); - } else { - unlock_page(pinned[1]); - } - } - while (iov_iter_count(&i) > 0) { size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t write_bytes = min(iov_iter_count(&i), @@ -1024,8 +1046,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + + /* + * if we have trouble faulting in the pages, fall + * back to one page at a time + */ + if (copied < write_bytes) + nrptrs = 1; + + if (copied == 0) + dirty_pages = 0; + else + dirty_pages = (copied + offset + + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) @@ -1069,10 +1103,6 @@ out: err = ret; kfree(pages); - if (pinned[0]) - page_cache_release(pinned[0]); - if (pinned[1]) - page_cache_release(pinned[1]); *ppos = pos; /* diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fb9bd7832b6d..9007bbd01dbf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) private = 0; if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, - (u64)-1, 1, EXTENT_DIRTY)) { + (u64)-1, 1, EXTENT_DIRTY, 0)) { ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, &private_failure); if (ret == 0) { @@ -4821,10 +4821,11 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, goto fail; /* - * 1 item for inode ref + * 2 items for inode and inode ref * 2 items for dir items + * 1 item for parent inode */ - trans = btrfs_start_transaction(root, 3); + trans = btrfs_start_transaction(root, 5); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto fail; @@ -5280,6 +5281,128 @@ out: return em; } +struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) +{ + struct extent_map *em; + struct extent_map *hole_em = NULL; + u64 range_start = start; + u64 end; + u64 found; + u64 found_end; + int err = 0; + + em = btrfs_get_extent(inode, page, pg_offset, start, len, create); + if (IS_ERR(em)) + return em; + if (em) { + /* + * if our em maps to a hole, there might + * actually be delalloc bytes behind it + */ + if (em->block_start != EXTENT_MAP_HOLE) + return em; + else + hole_em = em; + } + + /* check to see if we've wrapped (len == -1 or similar) */ + end = start + len; + if (end < start) + end = (u64)-1; + else + end -= 1; + + em = NULL; + + /* ok, we didn't find anything, lets look for delalloc */ + found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + end, len, EXTENT_DELALLOC, 1); + found_end = range_start + found; + if (found_end < range_start) + found_end = (u64)-1; + + /* + * we didn't find anything useful, return + * the original results from get_extent() + */ + if (range_start > end || found_end <= start) { + em = hole_em; + hole_em = NULL; + goto out; + } + + /* adjust the range_start to make sure it doesn't + * go backwards from the start they passed in + */ + range_start = max(start,range_start); + found = found_end - range_start; + + if (found > 0) { + u64 hole_start = start; + u64 hole_len = len; + + em = alloc_extent_map(GFP_NOFS); + if (!em) { + err = -ENOMEM; + goto out; + } + /* + * when btrfs_get_extent can't find anything it + * returns one huge hole + * + * make sure what it found really fits our range, and + * adjust to make sure it is based on the start from + * the caller + */ + if (hole_em) { + u64 calc_end = extent_map_end(hole_em); + + if (calc_end <= start || (hole_em->start > end)) { + free_extent_map(hole_em); + hole_em = NULL; + } else { + hole_start = max(hole_em->start, start); + hole_len = calc_end - hole_start; + } + } + em->bdev = NULL; + if (hole_em && range_start > hole_start) { + /* our hole starts before our delalloc, so we + * have to return just the parts of the hole + * that go until the delalloc starts + */ + em->len = min(hole_len, + range_start - hole_start); + em->start = hole_start; + em->orig_start = hole_start; + /* + * don't adjust block start at all, + * it is fixed at EXTENT_MAP_HOLE + */ + em->block_start = hole_em->block_start; + em->block_len = hole_len; + } else { + em->start = range_start; + em->len = found; + em->orig_start = range_start; + em->block_start = EXTENT_MAP_DELALLOC; + em->block_len = found; + } + } else if (hole_em) { + return hole_em; + } +out: + + free_extent_map(hole_em); + if (err) { + free_extent_map(em); + return ERR_PTR(err); + } + return em; +} + static struct extent_map *btrfs_new_extent_direct(struct inode *inode, u64 start, u64 len) { @@ -5934,6 +6057,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, if (!skip_sum) { dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); if (!dip->csums) { + kfree(dip); ret = -ENOMEM; goto free_ordered; } @@ -6102,7 +6226,7 @@ out: static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { - return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); + return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); } int btrfs_readpage(struct file *file, struct page *page) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index be2d4f6aaa5e..5fdb2abc4fa7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; - if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) + if (flags & BTRFS_SUBVOL_CREATE_ASYNC) return -EINVAL; if (flags & ~BTRFS_SUBVOL_RDONLY) return -EOPNOTSUPP; + if (!is_owner_or_cap(inode)) + return -EACCES; + down_write(&root->fs_info->subvol_sem); /* nothing to do */ @@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, goto out_reset; } - ret = btrfs_update_root(trans, root, + ret = btrfs_update_root(trans, root->fs_info->tree_root, &root->root_key, &root->root_item); btrfs_commit_transaction(trans, root); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399df..a178f5ebea78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, unsigned long tot_out; unsigned long tot_len; char *buf; + bool may_late_unmap, need_unmap; data_in = kmap(pages_in[0]); tot_len = read_compress_length(data_in); @@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, tot_in += in_len; working_bytes = in_len; + may_late_unmap = need_unmap = false; /* fast path: avoid using the working buffer */ if (in_page_bytes_left >= in_len) { buf = data_in + in_offset; bytes = in_len; + may_late_unmap = true; goto cont; } @@ -329,14 +332,17 @@ cont: if (working_bytes == 0 && tot_in >= tot_len) break; - kunmap(pages_in[page_in_index]); - page_in_index++; - if (page_in_index >= total_pages_in) { + if (page_in_index + 1 >= total_pages_in) { ret = -1; - data_in = NULL; goto done; } - data_in = kmap(pages_in[page_in_index]); + + if (may_late_unmap) + need_unmap = true; + else + kunmap(pages_in[page_in_index]); + + data_in = kmap(pages_in[++page_in_index]); in_page_bytes_left = PAGE_CACHE_SIZE; in_offset = 0; @@ -346,6 +352,8 @@ cont: out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, &out_len); + if (need_unmap) + kunmap(pages_in[page_in_index - 1]); if (ret != LZO_E_OK) { printk(KERN_WARNING "btrfs decompress failed\n"); ret = -1; @@ -363,8 +371,7 @@ cont: break; } done: - if (data_in) - kunmap(pages_in[page_in_index]); + kunmap(pages_in[page_in_index]); return ret; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0825e4ed9447..31ade5802ae8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) u32 item_size; int ret; int err = 0; + int progress = 0; path = btrfs_alloc_path(); if (!path) @@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } while (1) { + progress++; trans = btrfs_start_transaction(rc->extent_root, 0); BUG_ON(IS_ERR(trans)); - +restart: if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); continue; @@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } } } + if (trans && progress && err == -ENOSPC) { + ret = btrfs_force_chunk_alloc(trans, rc->extent_root, + rc->block_group->flags); + if (ret == 0) { + err = 0; + progress = 0; + goto restart; + } + } btrfs_release_path(rc->extent_root, path); clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a004008f7d28..d39a9895d932 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -155,7 +155,8 @@ enum { Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, - Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, + Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, + Opt_enospc_debug, Opt_err, }; static match_table_t tokens = { @@ -184,6 +185,7 @@ static match_table_t tokens = { {Opt_space_cache, "space_cache"}, {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, + {Opt_enospc_debug, "enospc_debug"}, {Opt_err, NULL}, }; @@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_user_subvol_rm_allowed: btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); break; + case Opt_enospc_debug: + btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index af7dbca15276..dd13eb81ee40 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ret = btrfs_shrink_device(device, 0); if (ret) - goto error_brelse; + goto error_undo; ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); if (ret) - goto error_brelse; + goto error_undo; device->in_fs_metadata = 0; @@ -1416,6 +1416,13 @@ out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); return ret; +error_undo: + if (device->writeable) { + list_add(&device->dev_alloc_list, + &root->fs_info->fs_devices->alloc_list); + root->fs_info->fs_devices->rw_devices++; + } + goto error_brelse; } /* @@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; - device->mode = 0; + device->mode = FMODE_EXCL; set_blocksize(device->bdev, 4096); if (seeding_dev) { diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0bc68de8edd7..ebafa65a29b6 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -409,7 +409,7 @@ more: spin_lock(&inode->i_lock); if (ci->i_release_count == fi->dir_release_count) { dout(" marking %p complete\n", inode); - ci->i_ceph_flags |= CEPH_I_COMPLETE; + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ ci->i_max_offset = filp->f_pos; } spin_unlock(&inode->i_lock); @@ -496,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, /* .snap dir? */ if (err == -ENOENT && + ceph_snap(parent) == CEPH_NOSNAP && strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) { struct inode *inode = ceph_get_snapdir(parent); @@ -992,7 +993,7 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *dir; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; dir = dentry->d_parent->d_inode; @@ -1029,28 +1030,8 @@ out_touch: static void ceph_dentry_release(struct dentry *dentry) { struct ceph_dentry_info *di = ceph_dentry(dentry); - struct inode *parent_inode = NULL; - u64 snapid = CEPH_NOSNAP; - if (!IS_ROOT(dentry)) { - parent_inode = dentry->d_parent->d_inode; - if (parent_inode) - snapid = ceph_snap(parent_inode); - } - dout("dentry_release %p parent %p\n", dentry, parent_inode); - if (parent_inode && snapid != CEPH_SNAPDIR) { - struct ceph_inode_info *ci = ceph_inode(parent_inode); - - spin_lock(&parent_inode->i_lock); - if (ci->i_shared_gen == di->lease_shared_gen || - snapid <= CEPH_MAXSNAP) { - dout(" clearing %p complete (d_release)\n", - parent_inode); - ci->i_ceph_flags &= ~CEPH_I_COMPLETE; - ci->i_release_count++; - } - spin_unlock(&parent_inode->i_lock); - } + dout("dentry_release %p\n", dentry); if (di) { ceph_dentry_lru_del(dentry); if (di->lease_session) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 5625463aa479..193bfa5e9cbd 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode, (issued & CEPH_CAP_FILE_EXCL) == 0 && (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { dout(" marking %p complete (empty)\n", inode); - ci->i_ceph_flags |= CEPH_I_COMPLETE; + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ ci->i_max_offset = 2; } break; diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 39c243acd062..f40b9139e437 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) if (lastinode) iput(lastinode); - dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); - list_for_each_entry(child, &realm->children, child_item) - queue_realm_cap_snaps(child); + list_for_each_entry(child, &realm->children, child_item) { + dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n", + realm, realm->ino, child, child->ino); + list_del_init(&child->dirty_item); + list_add(&child->dirty_item, &realm->dirty_item); + } + list_del_init(&realm->dirty_item); dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); } @@ -683,7 +687,9 @@ more: * queue cap snaps _after_ we've built the new snap contexts, * so that i_head_snapc can be set appropriately. */ - list_for_each_entry(realm, &dirty_realms, dirty_item) { + while (!list_empty(&dirty_realms)) { + realm = list_first_entry(&dirty_realms, struct ceph_snap_realm, + dirty_item); queue_realm_cap_snaps(realm); } diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4a3330235d55..a9371b6578c0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.70" +#define CIFS_VERSION "1.71" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 8d9189f64477..79f641eeda30 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) { int rc, alen, slen; const char *pct; - char *endp, scope_id[13]; + char scope_id[13]; struct sockaddr_in *s4 = (struct sockaddr_in *) dst; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; @@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) memcpy(scope_id, pct + 1, slen); scope_id[slen] = '\0'; - s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); - if (endp != scope_id + slen) - return 0; + rc = strict_strtoul(scope_id, 0, + (unsigned long *)&s6->sin6_scope_id); + rc = (rc == 0) ? 1 : 0; } return rc; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 1adc9625a344..16765703131b 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate: if (type == LANMAN) { #ifdef CONFIG_CIFS_WEAK_PW_HASH - char lnm_session_key[CIFS_SESS_KEY_SIZE]; + char lnm_session_key[CIFS_AUTH_RESP_SIZE]; pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; /* no capabilities flags in old lanman negotiation */ - pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); + pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* Calculate hash with password and copy into bcc_ptr. * Encryption Key (stored as in cryptkey) gets used if the @@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate: true : false, lnm_session_key); ses->flags |= CIFS_SES_LANMAN; - memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); - bcc_ptr += CIFS_SESS_KEY_SIZE; + memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; /* can not sign if LANMAN negotiated so no need to calculate signing key? but what if server diff --git a/fs/compat.c b/fs/compat.c index f6fd0a00e6cc..691c3fd8ce1d 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1228,7 +1228,9 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec, file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_readv(file, vec, vlen, &pos); + ret = -ESPIPE; + if (file->f_mode & FMODE_PREAD) + ret = compat_readv(file, vec, vlen, &pos); fput_light(file, fput_needed); return ret; } @@ -1285,7 +1287,9 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec, file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_writev(file, vec, vlen, &pos); + ret = -ESPIPE; + if (file->f_mode & FMODE_PWRITE) + ret = compat_writev(file, vec, vlen, &pos); fput_light(file, fput_needed); return ret; } diff --git a/fs/dcache.c b/fs/dcache.c index 2a6bd9a4ae97..611ffe928c03 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1523,6 +1523,28 @@ struct dentry * d_alloc_root(struct inode * root_inode) } EXPORT_SYMBOL(d_alloc_root); +static struct dentry * __d_find_any_alias(struct inode *inode) +{ + struct dentry *alias; + + if (list_empty(&inode->i_dentry)) + return NULL; + alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); + __dget(alias); + return alias; +} + +static struct dentry * d_find_any_alias(struct inode *inode) +{ + struct dentry *de; + + spin_lock(&inode->i_lock); + de = __d_find_any_alias(inode); + spin_unlock(&inode->i_lock); + return de; +} + + /** * d_obtain_alias - find or allocate a dentry for a given inode * @inode: inode to allocate the dentry for @@ -1552,7 +1574,7 @@ struct dentry *d_obtain_alias(struct inode *inode) if (IS_ERR(inode)) return ERR_CAST(inode); - res = d_find_alias(inode); + res = d_find_any_alias(inode); if (res) goto out_iput; @@ -1565,7 +1587,7 @@ struct dentry *d_obtain_alias(struct inode *inode) spin_lock(&inode->i_lock); - res = __d_find_alias(inode, 0); + res = __d_find_any_alias(inode); if (res) { spin_unlock(&inode->i_lock); dput(tmp); diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 6fc4f319b550..534c1d46e69e 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *lower_dentry; struct vfsmount *lower_mnt; - struct dentry *dentry_save; - struct vfsmount *vfsmount_save; + struct dentry *dentry_save = NULL; + struct vfsmount *vfsmount_save = NULL; int rc = 1; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) goto out; - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + } rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + } if (dentry->d_inode) { struct inode *lower_inode = ecryptfs_inode_to_lower(dentry->d_inode); diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index dbc84ed96336..e00753496e3e 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry, u32 flags); int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, struct dentry *lower_dentry, - struct inode *ecryptfs_dir_inode, - struct nameidata *ecryptfs_nd); + struct inode *ecryptfs_dir_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, struct dentry *ecryptfs_dentry, diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 81e10e6a9443..7d1050e254f9 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) const struct file_operations ecryptfs_dir_fops = { .readdir = ecryptfs_readdir, + .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index bd33f87a1907..b592938a84bc 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, unsigned int flags_save; int rc; - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - flags_save = nd->flags; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; - nd->flags &= ~LOOKUP_OPEN; + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + flags_save = nd->flags; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + nd->flags &= ~LOOKUP_OPEN; + } rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; - nd->flags = flags_save; + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + nd->flags = flags_save; + } return rc; } @@ -241,8 +245,7 @@ out: */ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, struct dentry *lower_dentry, - struct inode *ecryptfs_dir_inode, - struct nameidata *ecryptfs_nd) + struct inode *ecryptfs_dir_inode) { struct dentry *lower_dir_dentry; struct vfsmount *lower_mnt; @@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, goto out; if (special_file(lower_inode->i_mode)) goto out; - if (!ecryptfs_nd) - goto out; /* Released in this function */ page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); if (!page_virt) { @@ -349,75 +350,6 @@ out: } /** - * ecryptfs_new_lower_dentry - * @name: The name of the new dentry. - * @lower_dir_dentry: Parent directory of the new dentry. - * @nd: nameidata from last lookup. - * - * Create a new dentry or get it from lower parent dir. - */ -static struct dentry * -ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry, - struct nameidata *nd) -{ - struct dentry *new_dentry; - struct dentry *tmp; - struct inode *lower_dir_inode; - - lower_dir_inode = lower_dir_dentry->d_inode; - - tmp = d_alloc(lower_dir_dentry, name); - if (!tmp) - return ERR_PTR(-ENOMEM); - - mutex_lock(&lower_dir_inode->i_mutex); - new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd); - mutex_unlock(&lower_dir_inode->i_mutex); - - if (!new_dentry) - new_dentry = tmp; - else - dput(tmp); - - return new_dentry; -} - - -/** - * ecryptfs_lookup_one_lower - * @ecryptfs_dentry: The eCryptfs dentry that we are looking up - * @lower_dir_dentry: lower parent directory - * @name: lower file name - * - * Get the lower dentry from vfs. If lower dentry does not exist yet, - * create it. - */ -static struct dentry * -ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, - struct dentry *lower_dir_dentry, struct qstr *name) -{ - struct nameidata nd; - struct vfsmount *lower_mnt; - int err; - - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( - ecryptfs_dentry->d_parent)); - err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); - mntput(lower_mnt); - - if (!err) { - /* we dont need the mount */ - mntput(nd.path.mnt); - return nd.path.dentry; - } - if (err != -ENOENT) - return ERR_PTR(err); - - /* create a new lower dentry */ - return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd); -} - -/** * ecryptfs_lookup * @ecryptfs_dir_inode: The eCryptfs directory inode * @ecryptfs_dentry: The eCryptfs dentry that we are looking up @@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, size_t encrypted_and_encoded_name_size; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; struct dentry *lower_dir_dentry, *lower_dentry; - struct qstr lower_name; int rc = 0; if ((ecryptfs_dentry->d_name.len == 1 @@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, goto out_d_drop; } lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); - lower_name.name = ecryptfs_dentry->d_name.name; - lower_name.len = ecryptfs_dentry->d_name.len; - lower_name.hash = ecryptfs_dentry->d_name.hash; - if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { - rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, - lower_dir_dentry->d_inode, &lower_name); - if (rc < 0) - goto out_d_drop; - } - lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, - lower_dir_dentry, &lower_name); + mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, + lower_dir_dentry, + ecryptfs_dentry->d_name.len); + mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out_d_drop; @@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, "filename; rc = [%d]\n", __func__, rc); goto out_d_drop; } - lower_name.name = encrypted_and_encoded_name; - lower_name.len = encrypted_and_encoded_name_size; - lower_name.hash = full_name_hash(lower_name.name, lower_name.len); - if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { - rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, - lower_dir_dentry->d_inode, &lower_name); - if (rc < 0) - goto out_d_drop; - } - lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, - lower_dir_dentry, &lower_name); + mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + lower_dentry = lookup_one_len(encrypted_and_encoded_name, + lower_dir_dentry, + encrypted_and_encoded_name_size); + mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out_d_drop; } lookup_and_interpose: rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, - ecryptfs_dir_inode, - ecryptfs_nd); + ecryptfs_dir_inode); goto out; out_d_drop: d_drop(ecryptfs_dentry); @@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), ecryptfs_dentry_to_lower(dentry), &lower_stat); if (!rc) { + fsstack_copy_attr_all(dentry->d_inode, + ecryptfs_inode_to_lower(dentry->d_inode)); generic_fillattr(dentry->d_inode, stat); stat->blocks = lower_stat.blocks; } diff --git a/fs/eventfd.c b/fs/eventfd.c index e0194b3e14d6..d9a591773919 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get); * @ctx: [in] Pointer to eventfd context. * * The eventfd context reference must have been previously acquired either - * with eventfd_ctx_get() or eventfd_ctx_fdget()). + * with eventfd_ctx_get() or eventfd_ctx_fdget(). */ void eventfd_ctx_put(struct eventfd_ctx *ctx) { @@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. * @ctx: [in] Pointer to eventfd context. * @wait: [in] Wait queue to be removed. - * @cnt: [out] Pointer to the 64bit conter value. + * @cnt: [out] Pointer to the 64-bit counter value. * - * Returns zero if successful, or the following error codes: + * Returns %0 if successful, or the following error codes: * * -EAGAIN : The operation would have blocked. * @@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. * @ctx: [in] Pointer to eventfd context. * @no_wait: [in] Different from zero if the operation should not block. - * @cnt: [out] Pointer to the 64bit conter value. + * @cnt: [out] Pointer to the 64-bit counter value. * - * Returns zero if successful, or the following error codes: + * Returns %0 if successful, or the following error codes: * - * -EAGAIN : The operation would have blocked but @no_wait was nonzero. + * -EAGAIN : The operation would have blocked but @no_wait was non-zero. * -ERESTARTSYS : A signal interrupted the wait operation. * * If @no_wait is zero, the function might sleep until the eventfd internal diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 267d0ada4541..4a09af9e9a63 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -63,6 +63,13 @@ * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). + * It is also acquired when inserting an epoll fd onto another epoll + * fd. We do this so that we walk the epoll tree and ensure that this + * insertion does not create a cycle of epoll file descriptors, which + * could lead to deadlock. We need a global mutex to prevent two + * simultaneous inserts (A into B and B into A) from racing and + * constructing a cycle without either insert observing that it is + * going to. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. @@ -224,6 +231,9 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +/* Used to check for epoll file descriptor inclusion loops */ +static struct nested_calls poll_loop_ncalls; + /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; @@ -1198,6 +1208,62 @@ retry: return res; } +/** + * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() + * API, to verify that adding an epoll file inside another + * epoll structure, does not violate the constraints, in + * terms of closed loops, or too deep chains (which can + * result in excessive stack usage). + * + * @priv: Pointer to the epoll file to be currently checked. + * @cookie: Original cookie for this call. This is the top-of-the-chain epoll + * data structure pointer. + * @call_nests: Current dept of the @ep_call_nested() call stack. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) +{ + int error = 0; + struct file *file = priv; + struct eventpoll *ep = file->private_data; + struct rb_node *rbp; + struct epitem *epi; + + mutex_lock(&ep->mtx); + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, epi->ffd.file, + epi->ffd.file->private_data, current); + if (error != 0) + break; + } + } + mutex_unlock(&ep->mtx); + + return error; +} + +/** + * ep_loop_check - Performs a check to verify that adding an epoll file (@file) + * another epoll file (represented by @ep) does not create + * closed loops or too deep chains. + * + * @ep: Pointer to the epoll private data structure. + * @file: Pointer to the epoll file to be checked. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check(struct eventpoll *ep, struct file *file) +{ + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); +} + /* * Open an eventpoll file descriptor. */ @@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; + int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; @@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, */ ep = file->private_data; + /* + * When we insert an epoll file descriptor, inside another epoll file + * descriptor, there is the change of creating closed loops, which are + * better be handled here, than in more critical paths. + * + * We hold epmutex across the loop check and the insert in this case, in + * order to prevent two separate inserts from racing and each doing the + * insert "at the same time" such that ep_loop_check passes on both + * before either one does the insert, thereby creating a cycle. + */ + if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { + mutex_lock(&epmutex); + did_lock_epmutex = 1; + error = -ELOOP; + if (ep_loop_check(ep, tfile) != 0) + goto error_tgt_fput; + } + + mutex_lock(&ep->mtx); /* @@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: + if (unlikely(did_lock_epmutex)) + mutex_unlock(&epmutex); + fput(tfile); error_fput: fput(file); @@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void) EP_ITEM_COST; BUG_ON(max_user_watches < 0); + /* + * Initialize the structure used to perform epoll file descriptor + * inclusion loops checks. + */ + ep_nested_calls_init(&poll_loop_ncalls); + /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 264e95d02830..4d70db110cfc 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, new_de = exofs_find_entry(new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); err = exofs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (dir_de) @@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= EXOFS_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = exofs_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode->i_ctime = CURRENT_TIME; exofs_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2e1d8341d827..adb91855ccd0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(old_inode); ext2_delete_entry (old_de, old_page); - inode_dec_link_count(old_inode); if (dir_de) { if (old_dir != new_dir) diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index f88f752babd9..adae3fb7451a 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -43,7 +43,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry) static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) { - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* This is not negative dentry. Always valid. */ @@ -54,7 +54,7 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) { - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bfed8447ed80..8bd0ef9286c3 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -158,7 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) { struct inode *inode; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; inode = entry->d_inode; @@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, if (err) return err; - if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) - return 0; + if (attr->ia_valid & ATTR_OPEN) { + if (fc->atomic_o_trunc) + return 0; + file = NULL; + } if (attr->ia_valid & ATTR_SIZE) is_truncate = true; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 95da1bc1c826..9e0832dbb1e3 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) return ff; } +static void fuse_release_async(struct work_struct *work) +{ + struct fuse_req *req; + struct fuse_conn *fc; + struct path path; + + req = container_of(work, struct fuse_req, misc.release.work); + path = req->misc.release.path; + fc = get_fuse_conn(path.dentry->d_inode); + + fuse_put_request(fc, req); + path_put(&path); +} + static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - path_put(&req->misc.release.path); + if (fc->destroy_req) { + /* + * If this is a fuseblk mount, then it's possible that + * releasing the path will result in releasing the + * super block and sending the DESTROY request. If + * the server is single threaded, this would hang. + * For this reason do the path_put() in a separate + * thread. + */ + atomic_inc(&req->count); + INIT_WORK(&req->misc.release.work, fuse_release_async); + schedule_work(&req->misc.release.work); + } else { + path_put(&req->misc.release.path); + } } -static void fuse_file_put(struct fuse_file *ff) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; - req->end = fuse_release_end; - fuse_request_send_background(ff->fc, req); + if (sync) { + fuse_request_send(ff->fc, req); + path_put(&req->misc.release.path); + fuse_put_request(ff->fc, req); + } else { + req->end = fuse_release_end; + fuse_request_send_background(ff->fc, req); + } kfree(ff); } } @@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) * Normally this will send the RELEASE request, however if * some asynchronous READ or WRITE requests are outstanding, * the sending will be delayed. + * + * Make the release synchronous if this is a fuseblk mount, + * synchronous RELEASE is allowed (and desirable) in this case + * because the server can be trusted not to screw up. */ - fuse_file_put(ff); + fuse_file_put(ff, ff->fc->destroy_req != NULL); } static int fuse_open(struct inode *inode, struct file *file) @@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) page_cache_release(page); } if (req->ff) - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_send_readpages(struct fuse_req *req, struct file *file) @@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) { __free_page(req->pages[0]); - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ae5744a2f9e9..d4286947bc2c 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -21,6 +21,7 @@ #include <linux/rwsem.h> #include <linux/rbtree.h> #include <linux/poll.h> +#include <linux/workqueue.h> /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 @@ -262,7 +263,10 @@ struct fuse_req { /** Data for asynchronous requests */ union { struct { - struct fuse_release_in in; + union { + struct fuse_release_in in; + struct work_struct work; + }; struct path path; } release; struct fuse_init_in init_in; diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index 4a456338b873..0da8da2c991d 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c @@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) int error; int had_lock = 0; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; parent = dget_parent(dentry); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e6..7cd9a5a68d59 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) #endif glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + WQ_HIGHPRI | WQ_FREEZABLE, 0); if (IS_ERR(glock_workqueue)) return PTR_ERR(glock_workqueue); gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", - WQ_MEM_RECLAIM | WQ_FREEZEABLE, + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (IS_ERR(gfs2_delete_workqueue)) { destroy_workqueue(glock_workqueue); diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ebef7ab6e17e..72c31a315d96 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo) struct address_space *mapping = (struct address_space *)(gl + 1); gfs2_init_glock_once(gl); - memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); - spin_lock_init(&mapping->tree_lock); - spin_lock_init(&mapping->i_mmap_lock); - INIT_LIST_HEAD(&mapping->private_list); - spin_lock_init(&mapping->private_lock); - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); + address_space_init_once(mapping); } /** @@ -144,7 +137,7 @@ static int __init init_gfs2_fs(void) error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs_recovery_wq) goto fail_wq; diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index afa66aaa2237..b4d70b13be92 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) } /* - * hfs_unlink() + * hfs_remove() * - * This is the unlink() entry in the inode_operations structure for - * regular HFS directories. The purpose is to delete an existing - * file, given the inode for the parent directory and the name - * (and its length) of the existing file. - */ -static int hfs_unlink(struct inode *dir, struct dentry *dentry) -{ - struct inode *inode; - int res; - - inode = dentry->d_inode; - res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); - if (res) - return res; - - drop_nlink(inode); - hfs_delete_inode(inode); - inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - - return res; -} - -/* - * hfs_rmdir() + * This serves as both unlink() and rmdir() in the inode_operations + * structure for regular HFS directories. The purpose is to delete + * an existing child, given the inode for the parent directory and + * the name (and its length) of the existing directory. * - * This is the rmdir() entry in the inode_operations structure for - * regular HFS directories. The purpose is to delete an existing - * directory, given the inode for the parent directory and the name - * (and its length) of the existing directory. + * HFS does not have hardlinks, so both rmdir and unlink set the + * link count to 0. The only difference is the emptiness check. */ -static int hfs_rmdir(struct inode *dir, struct dentry *dentry) +static int hfs_remove(struct inode *dir, struct dentry *dentry) { - struct inode *inode; + struct inode *inode = dentry->d_inode; int res; - inode = dentry->d_inode; - if (inode->i_size != 2) + if (S_ISDIR(inode->i_mode) && inode->i_size != 2) return -ENOTEMPTY; res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); if (res) @@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, /* Unlink destination if it already exists */ if (new_dentry->d_inode) { - res = hfs_unlink(new_dir, new_dentry); + res = hfs_remove(new_dir, new_dentry); if (res) return res; } @@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = { const struct inode_operations hfs_dir_inode_operations = { .create = hfs_create, .lookup = hfs_lookup, - .unlink = hfs_unlink, + .unlink = hfs_remove, .mkdir = hfs_mkdir, - .rmdir = hfs_rmdir, + .rmdir = hfs_remove, .rename = hfs_rename, .setattr = hfs_inode_setattr, }; diff --git a/fs/inode.c b/fs/inode.c index da85e56378f3..0647d80accf6 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, i_callback); } +void address_space_init_once(struct address_space *mapping) +{ + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + spin_lock_init(&mapping->tree_lock); + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); + mutex_init(&mapping->unmap_mutex); +} +EXPORT_SYMBOL(address_space_init_once); + /* * These are initializations that only need to be done * once, because the fields are idempotent across use @@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode) INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); - INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - spin_lock_init(&inode->i_data.tree_lock); - spin_lock_init(&inode->i_data.i_mmap_lock); - INIT_LIST_HEAD(&inode->i_data.private_list); - spin_lock_init(&inode->i_data.private_lock); - INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); - INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); + address_space_init_once(&inode->i_data); i_size_ordered_init(inode); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&inode->i_fsnotify_marks); @@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb) /** * invalidate_inodes - attempt to free all inodes on a superblock * @sb: superblock to operate on + * @kill_dirty: flag to guide handling of dirty inodes * * Attempts to free all inodes for a given superblock. If there were any * busy inodes return a non-zero value, else zero. + * If @kill_dirty is set, discard dirty inodes too, otherwise treat + * them as busy. */ -int invalidate_inodes(struct super_block *sb) +int invalidate_inodes(struct super_block *sb, bool kill_dirty) { int busy = 0; struct inode *inode, *next; @@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb) list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) continue; + if (inode->i_state & I_DIRTY && !kill_dirty) { + busy = 1; + continue; + } if (atomic_read(&inode->i_count)) { busy = 1; continue; diff --git a/fs/internal.h b/fs/internal.h index 0663568b1247..9b976b57d7fe 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *); */ extern int get_nr_dirty_inodes(void); extern void evict_inodes(struct super_block *); -extern int invalidate_inodes(struct super_block *); +extern int invalidate_inodes(struct super_block *, bool); diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 81ead850ddb6..5a2b269428a6 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1600,7 +1600,7 @@ out: static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) { - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* * This is not negative dentry. Always valid. diff --git a/fs/minix/namei.c b/fs/minix/namei.c index ce7337ddfdbf..6e6777f1b4b2 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); minix_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= info->s_link_max) goto out_dir; } - inode_inc_link_count(old_inode); err = minix_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } minix_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { minix_set_link(dir_de, dir_page, new_dir); diff --git a/fs/namei.c b/fs/namei.c index 9e701e28a329..a4689eb2df28 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -795,7 +795,7 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ -static inline int do_follow_link(struct path *path, struct nameidata *nd) +static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd) { void *cookie; int err = -ELOOP; @@ -803,6 +803,7 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) /* We drop rcu-walk here */ if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) return -ECHILD; + BUG_ON(inode != path->dentry->d_inode); if (current->link_count >= MAX_NESTED_LINKS) goto loop; @@ -1413,8 +1414,7 @@ exec_again: goto out_dput; if (inode->i_op->follow_link) { - BUG_ON(inode != next.dentry->d_inode); - err = do_follow_link(&next, nd); + err = do_follow_link(inode, &next, nd); if (err) goto return_err; nd->inode = nd->path.dentry->d_inode; @@ -1458,8 +1458,7 @@ last_component: break; if (inode && unlikely(inode->i_op->follow_link) && (lookup_flags & LOOKUP_FOLLOW)) { - BUG_ON(inode != next.dentry->d_inode); - err = do_follow_link(&next, nd); + err = do_follow_link(inode, &next, nd); if (err) goto return_err; nd->inode = nd->path.dentry->d_inode; @@ -1547,6 +1546,7 @@ static int path_walk(const char *name, struct nameidata *nd) /* nd->path had been dropped */ current->total_link_count = 0; nd->path = save; + nd->inode = save.dentry->d_inode; path_get(&nd->path); nd->flags |= LOOKUP_REVAL; result = link_path_walk(name, nd); @@ -2456,22 +2456,29 @@ struct file *do_filp_open(int dfd, const char *pathname, /* !O_CREAT, simple open */ error = do_path_lookup(dfd, pathname, flags, &nd); if (unlikely(error)) - goto out_filp; + goto out_filp2; error = -ELOOP; if (!(nd.flags & LOOKUP_FOLLOW)) { if (nd.inode->i_op->follow_link) - goto out_path; + goto out_path2; } error = -ENOTDIR; if (nd.flags & LOOKUP_DIRECTORY) { if (!nd.inode->i_op->lookup) - goto out_path; + goto out_path2; } audit_inode(pathname, nd.path.dentry); filp = finish_open(&nd, open_flag, acc_mode); +out2: release_open_intent(&nd); return filp; +out_path2: + path_put(&nd.path); +out_filp2: + filp = ERR_PTR(error); + goto out2; + creat: /* OK, have to create the file. Find the parent. */ error = path_init_rcu(dfd, pathname, diff --git a/fs/namespace.c b/fs/namespace.c index 7b0b95371696..d1edf26025dc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags) */ br_write_lock(vfsmount_lock); if (mnt_get_count(mnt) != 2) { - br_write_lock(vfsmount_lock); + br_write_unlock(vfsmount_lock); return -EBUSY; } br_write_unlock(vfsmount_lock); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1cc600e77bb4..2f8e61816d75 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -37,6 +37,7 @@ #include <linux/inet.h> #include <linux/nfs_xdr.h> #include <linux/slab.h> +#include <linux/compat.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word) */ u64 nfs_compat_user_ino64(u64 fileid) { - int ino; +#ifdef CONFIG_COMPAT + compat_ulong_t ino; +#else + unsigned long ino; +#endif if (enable_ino64) return fileid; diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 7a7474073148..1be36cf65bfc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp); #if defined(CONFIG_NFS_V4_1) struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); +extern void nfs4_schedule_session_recovery(struct nfs4_session *); +#else +static inline void nfs4_schedule_session_recovery(struct nfs4_session *session) +{ +} #endif /* CONFIG_NFS_V4_1 */ extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); @@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); -extern void nfs4_schedule_state_recovery(struct nfs_client *); +extern void nfs4_schedule_lease_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); -extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); -extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); +extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs41_handle_recall_slot(struct nfs_client *clp); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index f5c9b125e8cc..b73c34375f60 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) goto out_err; } buf = kmalloc(rlen + 1, GFP_KERNEL); + if (!buf) { + dprintk("%s: Not enough memory\n", __func__); + goto out_err; + } buf[rlen] = '\0'; memcpy(buf, r_addr, rlen); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 78936a8f40ab..0a07e353a961 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -256,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, case -NFS4ERR_OPENMODE: if (state == NULL) break; - nfs4_state_mark_reclaim_nograce(clp, state); - goto do_state_recovery; + nfs4_schedule_stateid_recovery(server, state); + goto wait_on_recovery; case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_EXPIRED: - goto do_state_recovery; + nfs4_schedule_lease_recovery(clp); + goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -272,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR: %d Reset session\n", __func__, errorcode); - nfs4_schedule_state_recovery(clp); + nfs4_schedule_session_recovery(clp->cl_session); exception->retry = 1; break; #endif /* defined(CONFIG_NFS_V4_1) */ @@ -295,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, } /* We failed to handle the error */ return nfs4_map_errors(ret); -do_state_recovery: - nfs4_schedule_state_recovery(clp); +wait_on_recovery: ret = nfs4_wait_clnt_recover(clp); if (ret == 0) exception->retry = 1; @@ -435,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * clp = res->sr_session->clp; do_renew_lease(clp, timestamp); /* Check sequence flags */ - if (atomic_read(&clp->cl_count) > 1) - nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); + if (res->sr_status_flags != 0) + nfs4_schedule_lease_recovery(clp); break; case -NFS4ERR_DELAY: /* The server detected a resend of the RPC call and @@ -1255,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: - nfs4_schedule_state_recovery( - server->nfs_client); + nfs4_schedule_session_recovery(server->nfs_client->cl_session); goto out; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: /* Don't recall a delegation if it was lost */ - nfs4_schedule_state_recovery(server->nfs_client); + nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -ERESTARTSYS: /* @@ -1271,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state */ case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: - nfs4_state_mark_reclaim_nograce(server->nfs_client, state); + nfs4_schedule_stateid_recovery(server, state); case -EKEYEXPIRED: /* * User RPCSEC_GSS context has expired. @@ -1587,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) break; - nfs4_schedule_state_recovery(clp); + nfs4_schedule_state_manager(clp); ret = -EIO; } return ret; @@ -3178,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata) if (task->tk_status < 0) { /* Unless we're shutting down, schedule state recovery! */ if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) - nfs4_schedule_state_recovery(clp); + nfs4_schedule_lease_recovery(clp); return; } do_renew_lease(clp, timestamp); @@ -3252,6 +3251,35 @@ static void buf_to_pages(const void *buf, size_t buflen, } } +static int buf_to_pages_noslab(const void *buf, size_t buflen, + struct page **pages, unsigned int *pgbase) +{ + struct page *newpage, **spages; + int rc = 0; + size_t len; + spages = pages; + + do { + len = min_t(size_t, PAGE_CACHE_SIZE, buflen); + newpage = alloc_page(GFP_KERNEL); + + if (newpage == NULL) + goto unwind; + memcpy(page_address(newpage), buf, len); + buf += len; + buflen -= len; + *pages++ = newpage; + rc++; + } while (buflen != 0); + + return rc; + +unwind: + for(; rc > 0; rc--) + __free_page(spages[rc-1]); + return -ENOMEM; +} + struct nfs4_cached_acl { int cached; size_t len; @@ -3420,13 +3448,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl .rpc_argp = &arg, .rpc_resp = &res, }; - int ret; + int ret, i; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; + i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); + if (i < 0) + return i; nfs_inode_return_delegation(inode); - buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); ret = nfs4_call_sync(server, &msg, &arg, &res, 1); + + /* + * Free each page after tx, so the only ref left is + * held by the network stack + */ + for (; i > 0; i--) + put_page(pages[i-1]); + /* * Acl update can result in inode attribute update. * so mark the attribute cache invalid. @@ -3464,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, case -NFS4ERR_OPENMODE: if (state == NULL) break; - nfs4_state_mark_reclaim_nograce(clp, state); - goto do_state_recovery; + nfs4_schedule_stateid_recovery(server, state); + goto wait_on_recovery; case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_EXPIRED: - goto do_state_recovery; + nfs4_schedule_lease_recovery(clp); + goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -3480,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, case -NFS4ERR_SEQ_MISORDERED: dprintk("%s ERROR %d, Reset session\n", __func__, task->tk_status); - nfs4_schedule_state_recovery(clp); + nfs4_schedule_session_recovery(clp->cl_session); task->tk_status = 0; return -EAGAIN; #endif /* CONFIG_NFS_V4_1 */ @@ -3497,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, } task->tk_status = nfs4_map_errors(task->tk_status); return 0; -do_state_recovery: +wait_on_recovery: rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); - nfs4_schedule_state_recovery(clp); if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); task->tk_status = 0; @@ -4110,7 +4148,7 @@ static void nfs4_lock_release(void *calldata) task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, data->arg.lock_seqid); if (!IS_ERR(task)) - rpc_put_task(task); + rpc_put_task_async(task); dprintk("%s: cancelling lock!\n", __func__); } else nfs_free_seqid(data->arg.lock_seqid); @@ -4134,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) { - struct nfs_client *clp = server->nfs_client; - struct nfs4_state *state = lsp->ls_state; - switch (error) { case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: - case -NFS4ERR_EXPIRED: + lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; if (new_lock_owner != 0 || (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) - nfs4_state_mark_reclaim_nograce(clp, state); - lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; + nfs4_schedule_stateid_recovery(server, lsp->ls_state); break; case -NFS4ERR_STALE_STATEID: - if (new_lock_owner != 0 || - (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) - nfs4_state_mark_reclaim_reboot(clp, state); lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; + case -NFS4ERR_EXPIRED: + nfs4_schedule_lease_recovery(server->nfs_client); }; } @@ -4366,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) case -NFS4ERR_EXPIRED: case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: + nfs4_schedule_lease_recovery(server->nfs_client); + goto out; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: - nfs4_schedule_state_recovery(server->nfs_client); + nfs4_schedule_session_recovery(server->nfs_client->cl_session); goto out; case -ERESTARTSYS: /* @@ -4381,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OPENMODE: - nfs4_state_mark_reclaim_nograce(server->nfs_client, state); + nfs4_schedule_stateid_recovery(server, state); err = 0; goto out; case -EKEYEXPIRED: @@ -4988,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp) int status; unsigned *ptr; struct nfs4_session *session = clp->cl_session; + long timeout = 0; + int err; dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); - status = _nfs4_proc_create_session(clp); + do { + status = _nfs4_proc_create_session(clp); + if (status == -NFS4ERR_DELAY) { + err = nfs4_delay(clp->cl_rpcclient, &timeout); + if (err) + status = err; + } + } while (status == -NFS4ERR_DELAY); + if (status) goto out; @@ -5100,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client rpc_delay(task, NFS4_POLL_RETRY_MAX); return -EAGAIN; default: - nfs4_schedule_state_recovery(clp); + nfs4_schedule_lease_recovery(clp); } return 0; } @@ -5187,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr if (IS_ERR(task)) ret = PTR_ERR(task); else - rpc_put_task(task); + rpc_put_task_async(task); dprintk("<-- %s status=%d\n", __func__, ret); return ret; } @@ -5203,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) goto out; } ret = rpc_wait_for_completion_task(task); - if (!ret) + if (!ret) { + struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; + + if (task->tk_status == 0) + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); ret = task->tk_status; + } rpc_put_task(task); out: dprintk("<-- %s status=%d\n", __func__, ret); @@ -5241,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf rpc_delay(task, NFS4_POLL_RETRY_MAX); return -EAGAIN; default: - nfs4_schedule_state_recovery(clp); + nfs4_schedule_lease_recovery(clp); } return 0; } @@ -5309,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) status = PTR_ERR(task); goto out; } + status = nfs4_wait_for_completion_rpc_task(task); + if (status == 0) + status = task->tk_status; rpc_put_task(task); return 0; out: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e6742b57a04c..0592288f9f06 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) } /* - * Schedule a state recovery attempt + * Schedule a lease recovery attempt */ -void nfs4_schedule_state_recovery(struct nfs_client *clp) +void nfs4_schedule_lease_recovery(struct nfs_client *clp) { if (!clp) return; @@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } -int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) +static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); @@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st return 1; } -int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) +static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); @@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s return 1; } +void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state) +{ + struct nfs_client *clp = server->nfs_client; + + nfs4_state_mark_reclaim_nograce(clp, state); + nfs4_schedule_state_manager(clp); +} + static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; @@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) } #ifdef CONFIG_NFS_V4_1 +void nfs4_schedule_session_recovery(struct nfs4_session *session) +{ + nfs4_schedule_lease_recovery(session->clp); +} + void nfs41_handle_recall_slot(struct nfs_client *clp) { set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); - nfs4_schedule_state_recovery(clp); + nfs4_schedule_state_manager(clp); } static void nfs4_reset_all_state(struct nfs_client *clp) @@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp) if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { clp->cl_boot_time = CURRENT_TIME; nfs4_state_start_reclaim_nograce(clp); - nfs4_schedule_state_recovery(clp); + nfs4_schedule_state_manager(clp); } } @@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { nfs4_state_start_reclaim_reboot(clp); - nfs4_schedule_state_recovery(clp); + nfs4_schedule_state_manager(clp); } } @@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp) { nfs_expire_all_delegations(clp); if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) - nfs4_schedule_state_recovery(clp); + nfs4_schedule_state_manager(clp); } void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4e2c168b6ee9..94d50e86a124 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr, p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); *p++ = cpu_to_be32(OP_CREATE_SESSION); - p = xdr_encode_hyper(p, clp->cl_ex_clid); + p = xdr_encode_hyper(p, clp->cl_clientid); *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ @@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr, p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; - xdr_decode_hyper(p, &clp->cl_ex_clid); + xdr_decode_hyper(p, &clp->cl_clientid); p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 903908a20023..c541093a5bf2 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -86,11 +86,14 @@ /* Default path we try to mount. "%s" gets replaced by our IP address */ #define NFS_ROOT "/tftpboot/%s" +/* Default NFSROOT mount options. */ +#define NFS_DEF_OPTIONS "udp" + /* Parameters passed from the kernel command line */ static char nfs_root_parms[256] __initdata = ""; /* Text-based mount options passed to super.c */ -static char nfs_root_options[256] __initdata = ""; +static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS; /* Address of NFS server */ static __be32 servaddr __initdata = htonl(INADDR_NONE); @@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src, } static int __init root_nfs_cat(char *dest, const char *src, - const size_t destlen) + const size_t destlen) { + size_t len = strlen(dest); + + if (len && dest[len - 1] != ',') + if (strlcat(dest, ",", destlen) > destlen) + return -1; + if (strlcat(dest, src, destlen) > destlen) return -1; return 0; @@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, if (root_nfs_cat(nfs_root_options, incoming, sizeof(nfs_root_options))) return -1; - - /* - * Possibly prepare for more options to be appended - */ - if (nfs_root_options[0] != '\0' && - nfs_root_options[strlen(nfs_root_options)] != ',') - if (root_nfs_cat(nfs_root_options, ",", - sizeof(nfs_root_options))) - return -1; - return 0; } @@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, */ static int __init root_nfs_data(char *cmdline) { - char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; + char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; int len, retval = -1; char *tmp = NULL; const size_t tmplen = sizeof(nfs_export_path); @@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline) * Append mandatory options for nfsroot so they override * what has come before */ - snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", + snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4", &servaddr); - if (root_nfs_cat(nfs_root_options, addr_option, + if (root_nfs_cat(nfs_root_options, mand_options, sizeof(nfs_root_options))) goto out_optionstoolong; diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index e313a51acdd1..6481d537d69d 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n task_setup_data.rpc_client = NFS_CLIENT(dir); task = rpc_run_task(&task_setup_data); if (!IS_ERR(task)) - rpc_put_task(task); + rpc_put_task_async(task); return 1; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c8278f4046cb..42b92d7a9cc4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); + if (how & FLUSH_SYNC) + rpc_wait_for_completion_task(task); rpc_put_task(task); return 0; } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index cde36cb0f348..02eb4edf0ece 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -432,7 +432,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr, * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ - p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); + p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4); if (unlikely(p == NULL)) goto out_overflow; memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 54b60bfceb8d..7b566ec14e18 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2445,15 +2445,16 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) static struct nfs4_delegation * find_delegation_file(struct nfs4_file *fp, stateid_t *stid) { - struct nfs4_delegation *dp = NULL; + struct nfs4_delegation *dp; spin_lock(&recall_lock); - list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { - if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) - break; - } + list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) + if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) { + spin_unlock(&recall_lock); + return dp; + } spin_unlock(&recall_lock); - return dp; + return NULL; } int share_access_to_flags(u32 share_access) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 956629b9cdc9..615f0a9f0600 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) + return status; iattr->ia_valid |= ATTR_UID; } if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { @@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) + return status; iattr->ia_valid |= ATTR_GID; } if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { @@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, u32 dummy; char *machine_name; - int i; + int i, j; int nr_secflavs; READ_BUF(16); @@ -1215,7 +1215,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); - for (i = 0; i < dummy; ++i) + for (j = 0; j < dummy; ++j) READ32(dummy); break; case RPC_AUTH_GSS: diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 388e9e8f5286..85f7baa15f5d 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -35,11 +35,6 @@ #include "btnode.h" -void nilfs_btnode_cache_init_once(struct address_space *btnc) -{ - nilfs_mapping_init_once(btnc); -} - static const struct address_space_operations def_btnode_aops = { .sync_page = block_sync_page, }; diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 79037494f1e0..1b8ebd888c28 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { struct buffer_head *newbh; }; -void nilfs_btnode_cache_init_once(struct address_space *); void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); void nilfs_btnode_cache_clear(struct address_space *); struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 6a0e2a189f60..a0babd2bff6a 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct backing_dev_info *bdi = inode->i_sb->s_bdi; INIT_LIST_HEAD(&shadow->frozen_buffers); - nilfs_mapping_init_once(&shadow->frozen_data); + address_space_init_once(&shadow->frozen_data); nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); - nilfs_mapping_init_once(&shadow->frozen_btnodes); + address_space_init_once(&shadow->frozen_btnodes); nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); mi->mi_shadow = shadow; return 0; diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 98034271cd02..161791d26458 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inc_nlink(old_inode); nilfs_set_link(new_dir, new_de, new_page, old_inode); nilfs_mark_inode_dirty(new_dir); new_inode->i_ctime = CURRENT_TIME; @@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= NILFS_LINK_MAX) goto out_dir; } - inc_nlink(old_inode); err = nilfs_add_link(new_dentry, old_inode); - if (err) { - drop_nlink(old_inode); - nilfs_mark_inode_dirty(old_inode); + if (err) goto out_dir; - } if (dir_de) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); @@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode->i_ctime = CURRENT_TIME; nilfs_delete_entry(old_de, old_page); - drop_nlink(old_inode); if (dir_de) { nilfs_set_link(old_inode, dir_de, dir_page, new_dir); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0c432416cfef..a585b35fd6bc 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, return nc; } -void nilfs_mapping_init_once(struct address_space *mapping) -{ - memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); - spin_lock_init(&mapping->tree_lock); - INIT_LIST_HEAD(&mapping->private_list); - spin_lock_init(&mapping->private_lock); - - spin_lock_init(&mapping->i_mmap_lock); - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); -} - void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops) diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 622df27cd891..2a00953ebd5f 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *); int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_clear_dirty_pages(struct address_space *); -void nilfs_mapping_init_once(struct address_space *mapping); void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 55ebae5c7f39..2de9f636792a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, nilfs_segctor_map_segsum_entry( sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); - if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) + if (NILFS_I(inode)->i_root && + !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); /* skip finfo */ } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 58fd707174e1..1673b3d99842 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj) #ifdef CONFIG_NILFS_XATTR init_rwsem(&ii->xattr_sem); #endif - nilfs_btnode_cache_init_once(&ii->i_btnode_cache); + address_space_init_once(&ii->i_btnode_cache); ii->i_bmap = &ii->i_bmap_data; inode_init_once(&ii->vfs_inode); } diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 6d80ecc7834f..7eb90403fc8a 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -56,7 +56,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, int ret = 0; /* if all else fails, just return false */ struct ocfs2_super *osb; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 43e56b97f9c0..6180da1e37e6 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb) ocfs2_quota_trans_credits(sb); } -/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + - * bitmap block for the new bit) dx_root update for free list */ -#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) +/* data block for new dir/symlink, allocation of directory block, dx_root + * update for free list */ +#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1) static inline int ocfs2_add_dir_index_credits(struct super_block *sb) { diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index b5f9160e93e9..19ebc5aad391 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, u32 num_clusters, unsigned int e_flags) { int ret, delete, index, credits = 0; - u32 new_bit, new_len; + u32 new_bit, new_len, orig_num_clusters; unsigned int set_len; struct ocfs2_super *osb = OCFS2_SB(sb); handle_t *handle; @@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, goto out; } + orig_num_clusters = num_clusters; + while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, p_cluster, num_clusters, @@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, * in write-back mode. */ if (context->get_clusters == ocfs2_di_get_clusters) { - ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); + ret = ocfs2_cow_sync_writeback(sb, context, cpos, + orig_num_clusters); if (ret) mlog_errno(ret); } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 38f986d2447e..36c423fb0635 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb, struct mount_options *mopt, int is_remount) { - int status; + int status, user_stack = 0; char *p; u32 tmp; @@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb, memcpy(mopt->cluster_stack, args[0].from, OCFS2_STACK_LABEL_LEN); mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + /* + * Open code the memcmp here as we don't have + * an osb to pass to + * ocfs2_userspace_stack(). + */ + if (memcmp(mopt->cluster_stack, + OCFS2_CLASSIC_CLUSTER_STACK, + OCFS2_STACK_LABEL_LEN)) + user_stack = 1; break; case Opt_inode64: mopt->mount_opt |= OCFS2_MOUNT_INODE64; @@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb, } } - /* Ensure only one heartbeat mode */ - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | - OCFS2_MOUNT_HB_NONE); - if (hweight32(tmp) != 1) { - mlog(ML_ERROR, "Invalid heartbeat mount options\n"); - status = 0; - goto bail; + if (user_stack == 0) { + /* Ensure only one heartbeat mode */ + tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | + OCFS2_MOUNT_HB_GLOBAL | + OCFS2_MOUNT_HB_NONE); + if (hweight32(tmp) != 1) { + mlog(ML_ERROR, "Invalid heartbeat mount options\n"); + status = 0; + goto bail; + } } status = 1; diff --git a/fs/open.c b/fs/open.c index 5a2c6ebc22b5..b47aab39c057 100644 --- a/fs/open.c +++ b/fs/open.c @@ -233,6 +233,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (!(file->f_mode & FMODE_WRITE)) return -EBADF; + + /* It's not possible punch hole on append only file */ + if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) + return -EPERM; + + if (IS_IMMUTABLE(inode)) + return -EPERM; + /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 789c625c7aa5..b10e3540d5b7 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) } vm->vblk_size = get_unaligned_be32(data + 0x08); + if (vm->vblk_size == 0) { + ldm_error ("Illegal VBLK size"); + return false; + } + vm->vblk_offset = get_unaligned_be32(data + 0x0C); vm->last_vblk_seq = get_unaligned_be32(data + 0x04); diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 68d6a216ee79..11f688bd76c5 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) int mac_partition(struct parsed_partitions *state) { - int slot = 1; Sector sect; unsigned char *data; - int blk, blocks_in_map; + int slot, blocks_in_map; unsigned secsize; #ifdef CONFIG_PPC_PMAC int found_root = 0; @@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) put_dev_sector(sect); return 0; /* not a MacOS disk */ } - strlcat(state->pp_buf, " [mac]", PAGE_SIZE); blocks_in_map = be32_to_cpu(part->map_count); - for (blk = 1; blk <= blocks_in_map; ++blk) { - int pos = blk * secsize; + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { + put_dev_sector(sect); + return 0; + } + strlcat(state->pp_buf, " [mac]", PAGE_SIZE); + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; put_dev_sector(sect); data = read_part_sector(state, pos/512, §); if (!data) @@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) } if (goodness > found_root_goodness) { - found_root = blk; + found_root = slot; found_root_goodness = goodness; } } #endif /* CONFIG_PPC_PMAC */ - - ++slot; } #ifdef CONFIG_PPC_PMAC if (found_root_goodness) diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c index 48cec7cbca17..be03a0b08b47 100644 --- a/fs/partitions/osf.c +++ b/fs/partitions/osf.c @@ -10,10 +10,13 @@ #include "check.h" #include "osf.h" +#define MAX_OSF_PARTITIONS 8 + int osf_partition(struct parsed_partitions *state) { int i; int slot = 1; + unsigned int npartitions; Sector sect; unsigned char *data; struct disklabel { @@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state) u8 p_fstype; u8 p_frag; __le16 p_cpg; - } d_partitions[8]; + } d_partitions[MAX_OSF_PARTITIONS]; } * label; struct d_partition * partition; @@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state) put_dev_sector(sect); return 0; } - for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) { + npartitions = le16_to_cpu(label->d_npartitions); + if (npartitions > MAX_OSF_PARTITIONS) { + put_dev_sector(sect); + return 0; + } + for (i = 0 ; i < npartitions; i++, partition++) { if (slot == state->limit) break; if (le32_to_cpu(partition->p_size)) diff --git a/fs/proc/base.c b/fs/proc/base.c index 9d096e82b201..d49c4b5d2c3e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2620,35 +2620,6 @@ static const struct pid_entry proc_base_stuff[] = { &proc_self_inode_operations, NULL, {}), }; -/* - * Exceptional case: normally we are not allowed to unhash a busy - * directory. In this case, however, we can do it - no aliasing problems - * due to the way we treat inodes. - */ -static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) -{ - struct inode *inode; - struct task_struct *task; - - if (nd->flags & LOOKUP_RCU) - return -ECHILD; - - inode = dentry->d_inode; - task = get_proc_task(inode); - if (task) { - put_task_struct(task); - return 1; - } - d_drop(dentry); - return 0; -} - -static const struct dentry_operations proc_base_dentry_operations = -{ - .d_revalidate = proc_base_revalidate, - .d_delete = pid_delete_dentry, -}; - static struct dentry *proc_base_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { @@ -2685,7 +2656,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir, if (p->fop) inode->i_fop = p->fop; ei->op = p->op; - d_set_d_op(dentry, &proc_base_dentry_operations); d_add(dentry, inode); error = NULL; out: diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 176ce4cda68a..d6a7ca1fdac5 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -27,6 +27,7 @@ static void proc_evict_inode(struct inode *inode) { struct proc_dir_entry *de; + struct ctl_table_header *head; truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); @@ -38,8 +39,11 @@ static void proc_evict_inode(struct inode *inode) de = PROC_I(inode)->pde; if (de) pde_put(de); - if (PROC_I(inode)->sysctl) - sysctl_head_put(PROC_I(inode)->sysctl); + head = PROC_I(inode)->sysctl; + if (head) { + rcu_assign_pointer(PROC_I(inode)->sysctl, NULL); + sysctl_head_put(head); + } } struct vfsmount *proc_mnt; diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index d9396a4fc7ff..927cbd115e53 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -233,7 +233,7 @@ void __init proc_device_tree_init(void) return; root = of_find_node_by_path("/"); if (root == NULL) { - printk(KERN_ERR "/proc/device-tree: can't find root\n"); + pr_debug("/proc/device-tree: can't find root\n"); return; } proc_device_tree_add_node(root, proc_device_tree); diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 09a1f92a34ef..8eb2522111c5 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -408,15 +408,18 @@ static int proc_sys_compare(const struct dentry *parent, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { + struct ctl_table_header *head; /* Although proc doesn't have negative dentries, rcu-walk means * that inode here can be NULL */ + /* AV: can it, indeed? */ if (!inode) - return 0; + return 1; if (name->len != len) return 1; if (memcmp(name->name, str, len)) return 1; - return !sysctl_is_seen(PROC_I(inode)->sysctl); + head = rcu_dereference(PROC_I(inode)->sysctl); + return !head || !sysctl_is_seen(head); } static const struct dentry_operations proc_sys_dentry_operations = { diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index ba5f51ec3458..68fdf45cc6c9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, dentry, inode, &security); if (retval) { - dir->i_nlink--; + DEC_DIR_INODE_NLINK(dir) goto out_failed; } diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 3cfb2e933644..5c11ca82b782 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -978,8 +978,6 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) { - if (nd->flags & LOOKUP_RCU) - return -ECHILD; return -EPERM; } diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b427b1208c26..e474fbcf8bde 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, new_de = sysv_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); sysv_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) goto out_dir; } - inode_inc_link_count(old_inode); err = sysv_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } sysv_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { sysv_set_link(dir_de, dir_page, new_dir); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 2be0f9eb86d2..b7c338d5e9df 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -32,6 +32,8 @@ #include <linux/crc-itu-t.h> #include <linux/exportfs.h> +enum { UDF_MAX_LINKS = 0xffff }; + static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) { @@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) struct udf_inode_info *iinfo; err = -EMLINK; - if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) + if (dir->i_nlink >= UDF_MAX_LINKS) goto out; err = -EIO; @@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, struct fileIdentDesc cfi, *fi; int err; - if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { + if (inode->i_nlink >= UDF_MAX_LINKS) return -EMLINK; - } fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { @@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; retval = -EMLINK; - if (!new_inode && - new_dir->i_nlink >= - (256 << sizeof(new_dir->i_nlink)) - 1) + if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) goto end_rename; } if (!nfi) { diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 12f39b9e4437..d6f681535eb8 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); ufs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= UFS_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = ufs_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; ufs_delete_entry(old_dir, old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { ufs_set_link(old_inode, dir_de, dir_page, new_dir); diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c index 05201ae719e5..d61611c88012 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/linux-2.6/xfs_discard.c @@ -152,6 +152,8 @@ xfs_ioc_trim( if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); + if (!blk_queue_discard(q)) + return -XFS_ERROR(EOPNOTSUPP); if (copy_from_user(&range, urange, sizeof(range))) return -XFS_ERROR(EFAULT); diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index f5e2a19e0f8e..0ca0e3c024d7 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1( xfs_mount_t *mp, void __user *arg) { - xfs_fsop_geom_v1_t fsgeo; + xfs_fsop_geom_t fsgeo; int error; - error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); + error = xfs_fs_geometry(mp, &fsgeo, 3); if (error) return -error; - if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) + /* + * Caller should have passed an argument of type + * xfs_fsop_geom_v1_t. This is a proper subset of the + * xfs_fsop_geom_t that xfs_fs_geometry() fills in. + */ + if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) return -XFS_ERROR(EFAULT); return 0; } diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index cec89dd5d7d2..85668efb3e3e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -53,6 +53,9 @@ xfs_fs_geometry( xfs_fsop_geom_t *geo, int new_version) { + + memset(geo, 0, sizeof(*geo)); + geo->blocksize = mp->m_sb.sb_blocksize; geo->rtextsize = mp->m_sb.sb_rextsize; geo->agblocks = mp->m_sb.sb_agblocks; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 31b6188df221..b4bfe338ea0e 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -4,6 +4,8 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU +#include <linux/mm_types.h> + #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index fe29aadb129d..348843b80150 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1101,7 +1101,7 @@ struct drm_device { struct platform_device *platformdev; /**< Platform device struture */ struct drm_sg_mem *sg; /**< Scatter gather memory */ - int num_crtcs; /**< Number of CRTCs on this device */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ void *dev_private; /**< device private data */ void *mm_private; struct address_space *dev_mapping; diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index 5cb86c307f5d..fc4875433817 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h @@ -99,7 +99,6 @@ struct rxrpc_key_token { * structure of raw payloads passed to add_key() or instantiate key */ struct rxrpc_key_data_v1 { - u32 kif_version; /* 1 */ u16 security_index; u16 ticket_length; u32 expiry; /* time_t */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d18ff34670a..d5063e1b5555 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); -extern void __blk_run_queue(struct request_queue *); +extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); extern void blk_run_queue(struct request_queue *); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, @@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP /* @@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); -extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); extern void throtl_shutdown_timer_wq(struct request_queue *q); #else /* CONFIG_BLK_DEV_THROTTLING */ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) @@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline int blk_throtl_exit(struct request_queue *q) { return 0; } -static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} #endif /* CONFIG_BLK_DEV_THROTTLING */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 3395cf7130f5..b22fb0d3db0f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq) extern void blk_dump_cmd(char *buf, struct request *rq); extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); -extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index c3011beac30d..31d91a64838b 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -123,6 +123,7 @@ struct ceph_msg_pos { #define SOCK_CLOSED 11 /* socket state changed to closed */ #define OPENING 13 /* open connection w/ (possibly new) peer */ #define DEAD 14 /* dead, about to kfree */ +#define BACKOFF 15 /* * A single connection with another host. @@ -160,7 +161,6 @@ struct ceph_connection { struct list_head out_queue; struct list_head out_sent; /* sending or sent but unacked */ u64 out_seq; /* last message queued for send */ - bool out_keepalive_pending; u64 in_seq, in_seq_acked; /* last message received, acked */ diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index 68cd248f6d3e..66900e3c6eb1 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h @@ -101,8 +101,8 @@ struct ieee_pfc { */ struct dcb_app { __u8 selector; - __u32 protocol; __u8 priority; + __u16 protocol; }; struct dcbmsg { diff --git a/include/linux/freezer.h b/include/linux/freezer.h index da7e52b099f3..1effc8b56b4e 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -109,7 +109,7 @@ static inline void freezer_count(void) } /* - * Check if the task should be counted as freezeable by the freezer + * Check if the task should be counted as freezable by the freezer */ static inline int freezer_should_skip(struct task_struct *p) { diff --git a/include/linux/fs.h b/include/linux/fs.h index bd3215940c37..e38b50a4b9d2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -649,6 +649,7 @@ struct address_space { spinlock_t private_lock; /* for use by the address_space */ struct list_head private_list; /* ditto */ struct address_space *assoc_mapping; /* ditto */ + struct mutex unmap_mutex; /* to protect unmapping */ } __attribute__((aligned(sizeof(long)))); /* * On most architectures that alignment is already the case; but @@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); -extern int __invalidate_device(struct block_device *); +extern int __invalidate_device(struct block_device *, bool); extern int invalidate_partition(struct gendisk *, int); #endif unsigned long invalidate_mapping_pages(struct address_space *mapping, @@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); extern int inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); +extern void address_space_init_once(struct address_space *mapping); extern void ihold(struct inode * inode); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0b84c61607e8..dca31761b311 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) return alloc_pages_current(gfp_mask, order); } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, - struct vm_area_struct *vma, unsigned long addr); + struct vm_area_struct *vma, unsigned long addr, + int node); #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_pages_vma(gfp_mask, order, vma, addr) \ +#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) -#define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr) +#define alloc_page_vma(gfp_mask, vma, addr) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) +#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, node) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e49..d746da19c6a2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -55,7 +55,7 @@ * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend - * + * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 @@ -67,6 +67,7 @@ #define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 #define IRQF_NO_SUSPEND 0x00004000 +#define IRQF_FORCE_RESUME 0x00008000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) diff --git a/include/linux/list.h b/include/linux/list.h index 9a5f8a71810c..3a54266a1e85 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) * in an undefined state. */ #ifndef CONFIG_DEBUG_LIST +static inline void __list_del_entry(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); @@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry) entry->prev = LIST_POISON2; } #else +extern void __list_del_entry(struct list_head *entry); extern void list_del(struct list_head *entry); #endif @@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old, */ static inline void list_del_init(struct list_head *entry) { - __list_del(entry->prev, entry->next); + __list_del_entry(entry); INIT_LIST_HEAD(entry); } @@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry) */ static inline void list_move(struct list_head *list, struct list_head *head) { - __list_del(list->prev, list->next); + __list_del_entry(list); list_add(list, head); } @@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head) static inline void list_move_tail(struct list_head *list, struct list_head *head) { - __list_del(list->prev, list->next); + __list_del_entry(list); list_add_tail(list, head); } diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index 3fd36845ca45..ef4f0b6083a3 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h @@ -71,6 +71,7 @@ struct wm8994 { u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; /* Used over suspend/resume */ + bool suspended; u16 ldo_regs[WM8994_NUM_LDO_REGS]; u16 gpio_regs[WM8994_NUM_GPIO_REGS]; diff --git a/include/linux/module.h b/include/linux/module.h index 9bdf27c7615b..5de42043dff0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -62,7 +62,7 @@ struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; -}; +} __attribute__ ((__aligned__(sizeof(void *)))); struct module_kobject { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d971346b0340..71caf7a5e6c6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2392,6 +2392,9 @@ extern int netdev_notice(const struct net_device *dev, const char *format, ...) extern int netdev_info(const struct net_device *dev, const char *format, ...) __attribute__ ((format (printf, 2, 3))); +#define MODULE_ALIAS_NETDEV(device) \ + MODULE_ALIAS("netdev-" device) + #if defined(DEBUG) #define netdev_dbg(__dev, format, args...) \ netdev_printk(KERN_DEBUG, __dev, format, ##args) diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b197563913bf..3e112de12d8d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -68,11 +68,7 @@ struct nfs_client { unsigned char cl_id_uniquifier; u32 cl_cb_ident; /* v4.0 callback identifier */ const struct nfs4_minor_version_ops *cl_mvops; -#endif /* CONFIG_NFS_V4 */ -#ifdef CONFIG_NFS_V4_1 - /* clientid returned from EXCHANGE_ID, used by session operations */ - u64 cl_ex_clid; /* The sequence id to use for the next CREATE_SESSION */ u32 cl_seqid; /* The flags used for obtaining the clientid during EXCHANGE_ID */ @@ -80,7 +76,7 @@ struct nfs_client { struct nfs4_session *cl_session; /* sharred session */ struct list_head cl_layouts; struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ -#endif /* CONFIG_NFS_V4_1 */ +#endif /* CONFIG_NFS_V4 */ #ifdef CONFIG_NFS_FSCACHE struct fscache_cookie *fscache; /* client index cache cookie */ @@ -185,7 +181,7 @@ struct nfs_server { /* maximum number of slots to use */ #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE -#if defined(CONFIG_NFS_V4_1) +#if defined(CONFIG_NFS_V4) /* Sessions */ #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) @@ -225,5 +221,5 @@ struct nfs4_session { struct nfs_client *clp; }; -#endif /* CONFIG_NFS_V4_1 */ +#endif /* CONFIG_NFS_V4 */ #endif diff --git a/include/linux/pm.h b/include/linux/pm.h index dd9c7ab38270..21415cc91cbb 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -431,6 +431,8 @@ struct dev_pm_info { struct list_head entry; struct completion completion; struct wakeup_source *wakeup; +#else + unsigned int should_wakeup:1; #endif #ifdef CONFIG_PM_RUNTIME struct timer_list suspend_timer; diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 9cff00dd6b63..03a67db03d01 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev) return dev->power.can_wakeup; } -static inline bool device_may_wakeup(struct device *dev) -{ - return false; -} - static inline struct wakeup_source *wakeup_source_create(const char *name) { return NULL; @@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {} static inline int device_wakeup_enable(struct device *dev) { - return -EINVAL; + dev->power.should_wakeup = true; + return 0; } static inline int device_wakeup_disable(struct device *dev) { + dev->power.should_wakeup = false; return 0; } -static inline int device_init_wakeup(struct device *dev, bool val) +static inline int device_set_wakeup_enable(struct device *dev, bool enable) { - dev->power.can_wakeup = val; - return val ? -EINVAL : 0; + dev->power.should_wakeup = enable; + return 0; } +static inline int device_init_wakeup(struct device *dev, bool val) +{ + device_set_wakeup_capable(dev, val); + device_set_wakeup_enable(dev, val); + return 0; +} -static inline int device_set_wakeup_enable(struct device *dev, bool enable) +static inline bool device_may_wakeup(struct device *dev) { - return -EINVAL; + return dev->power.can_wakeup && dev->power.should_wakeup; } static inline void __pm_stay_awake(struct wakeup_source *ws) {} diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 092a04f874a8..a1147e5dd245 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -102,11 +102,8 @@ extern long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data); -extern int ptrace_traceme(void); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); -extern int ptrace_attach(struct task_struct *tsk); -extern int ptrace_detach(struct task_struct *, unsigned int); extern void ptrace_disable(struct task_struct *); extern int ptrace_check_attach(struct task_struct *task, int kill); extern int ptrace_request(struct task_struct *child, long request, diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index d63dcbaea169..9026b30238f3 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h @@ -14,10 +14,12 @@ #define LINUX_RIO_REGS_H /* - * In RapidIO, each device has a 2MB configuration space that is + * In RapidIO, each device has a 16MB configuration space that is * accessed via maintenance transactions. Portions of configuration * space are standardized and/or reserved. */ +#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ + #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index a0b639f8e805..89c3e5182991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -203,6 +203,18 @@ struct rtc_device struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; + + +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + struct work_struct uie_task; + struct timer_list uie_timer; + /* Those fields are protected by rtc->irq_lock */ + unsigned int oldsecs; + unsigned int uie_irq_active:1; + unsigned int stop_uie_polling:1; + unsigned int uie_task_active:1; + unsigned int uie_timer_active:1; +#endif }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) @@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); +void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); void rtc_aie_update_irq(void *private); void rtc_uie_update_irq(void *private); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); diff --git a/include/linux/sched.h b/include/linux/sched.h index d747f948b34e..777d8a5ed06b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ /* diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 88513fd8e208..d81db8012c63 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, const struct rpc_call_ops *ops); void rpc_put_task(struct rpc_task *); +void rpc_put_task_async(struct rpc_task *); void rpc_exit_task(struct rpc_task *); void rpc_exit(struct rpc_task *, int); void rpc_release_calldata(const struct rpc_call_ops *, void *); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 7bb5cb64f3b8..11684d9e6bd2 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -930,6 +930,7 @@ enum #ifdef __KERNEL__ #include <linux/list.h> +#include <linux/rcupdate.h> /* For the /proc/sys support */ struct ctl_table; @@ -1037,10 +1038,15 @@ struct ctl_table_root { struct ctl_table trees. */ struct ctl_table_header { - struct ctl_table *ctl_table; - struct list_head ctl_entry; - int used; - int count; + union { + struct { + struct ctl_table *ctl_table; + struct list_head ctl_entry; + int used; + int count; + }; + struct rcu_head rcu; + }; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 8651556dbd52..d3ec89fb4122 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, const struct thermal_cooling_device_ops *); void thermal_cooling_device_unregister(struct thermal_cooling_device *); + +#ifdef CONFIG_NET extern int generate_netlink_event(u32 orig, enum events event); +#else +static inline int generate_netlink_event(u32 orig, enum events event) +{ + return 0; +} +#endif #endif /* __THERMAL_H__ */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1ac11586a2f5..f7998a3bf020 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } enum { WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ - WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ @@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, /** * alloc_ordered_workqueue - allocate an ordered workqueue * @name: name of the workqueue - * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) * * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are @@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) #define create_workqueue(name) \ alloc_workqueue((name), WQ_MEM_RECLAIM, 1) -#define create_freezeable_workqueue(name) \ - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) +#define create_freezable_workqueue(name) \ + alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) #define create_singlethread_workqueue(name) \ alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4a3cd2cd2f5e..96e50e0ce3ca 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -89,6 +89,18 @@ #define IPV6_ADDR_SCOPE_GLOBAL 0x0e /* + * Addr flags + */ +#ifdef __KERNEL__ +#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ + ((a)->s6_addr[1] & 0x10) +#define IPV6_ADDR_MC_FLAG_PREFIX(a) \ + ((a)->s6_addr[1] & 0x20) +#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ + ((a)->s6_addr[1] & 0x40) +#endif + +/* * fragmentation header */ diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index cd85b3bc8327..e505358d8999 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h @@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, } #endif -static inline void -nf_tproxy_put_sock(struct sock *sk) -{ - /* TIME_WAIT inet sockets have to be handled differently */ - if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT)) - inet_twsk_put(inet_twsk(sk)); - else - sock_put(sk); -} - /* assign a socket to the skb -- consumes sk */ -int +void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 160a407c1963..04f8556313d5 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -199,7 +199,7 @@ struct tcf_proto { struct qdisc_skb_cb { unsigned int pkt_len; - char data[]; + long data[]; }; static inline int qdisc_qlen(struct Qdisc *q) diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index 8479b66c067b..3fd5064dd43a 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h @@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev); #define CONF_ENABLE_ESR 0x0008 #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ * (CONF_ENABLE_IRQ) in use */ +#define CONF_ENABLE_ZVCARD 0x0020 /* flags used by pcmcia_loop_config() autoconfiguration */ #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h index b4a0db2307ef..1eeebd534f7e 100644 --- a/include/sound/wm8903.h +++ b/include/sound/wm8903.h @@ -17,13 +17,9 @@ /* * R6 (0x06) - Mic Bias Control 0 */ -#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */ -#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */ -#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */ +#define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */ +#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */ +#define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */ #define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ #define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ #define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 07fdfb6b9a9a..0828b6c8610a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -8,7 +8,6 @@ #include <scsi/scsi_cmnd.h> #include <net/sock.h> #include <net/tcp.h> -#include "target_core_mib.h" #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) @@ -195,6 +194,21 @@ typedef enum { SAM_TASK_ATTR_EMULATED } t10_task_attr_index_t; +/* + * Used for target SCSI statistics + */ +typedef enum { + SCSI_INST_INDEX, + SCSI_DEVICE_INDEX, + SCSI_AUTH_INTR_INDEX, + SCSI_INDEX_TYPE_MAX +} scsi_index_t; + +struct scsi_index_table { + spinlock_t lock; + u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; +} ____cacheline_aligned; + struct se_cmd; struct t10_alua { @@ -578,8 +592,6 @@ struct se_node_acl { spinlock_t stats_lock; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t acl_pr_ref_count; - /* Used for MIB access */ - atomic_t mib_ref_count; struct se_dev_entry *device_list; struct se_session *nacl_sess; struct se_portal_group *se_tpg; @@ -595,8 +607,6 @@ struct se_node_acl { } ____cacheline_aligned; struct se_session { - /* Used for MIB access */ - atomic_t mib_ref_count; u64 sess_bin_isid; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; @@ -806,7 +816,6 @@ struct se_hba { /* Virtual iSCSI devices attached. */ u32 dev_count; u32 hba_index; - atomic_t dev_mib_access_count; atomic_t load_balance_queue; atomic_t left_queue_depth; /* Maximum queue depth the HBA can handle. */ @@ -845,6 +854,12 @@ struct se_lun { #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) +struct scsi_port_stats { + u64 cmd_pdus; + u64 tx_data_octets; + u64 rx_data_octets; +} ____cacheline_aligned; + struct se_port { /* RELATIVE TARGET PORT IDENTIFER */ u16 sep_rtpi; @@ -867,6 +882,7 @@ struct se_port { } ____cacheline_aligned; struct se_tpg_np { + struct se_portal_group *tpg_np_parent; struct config_group tpg_np_group; } ____cacheline_aligned; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 66f44e56eb80..2e8ec51f0615 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -111,6 +111,8 @@ struct se_subsystem_api; extern int init_se_global(void); extern void release_se_global(void); +extern void init_scsi_index_table(void); +extern u32 scsi_get_new_index(scsi_index_t); extern void transport_init_queue_obj(struct se_queue_obj *); extern int transport_subsystem_check_init(void); extern int transport_subsystem_register(struct se_subsystem_api *); @@ -133,6 +135,8 @@ extern void transport_complete_task(struct se_task *, int); extern void transport_add_task_to_execute_queue(struct se_task *, struct se_task *, struct se_device *); +extern void transport_remove_task_from_execute_queue(struct se_task *, + struct se_device *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); extern void transport_dump_dev_state(struct se_device *, char *, int *); extern void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/include/trace/events/block.h b/include/trace/events/block.h index aba421d68f6f..78f18adb49c8 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, 0 : blk_rq_sectors(rq); __entry->errors = rq->errors; - blk_fill_rwbs_rq(__entry->rwbs, rq); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); blk_dump_cmd(__get_str(cmd), rq); ), @@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq, __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_bytes(rq) : 0; - blk_fill_rwbs_rq(__entry->rwbs, rq); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); blk_dump_cmd(__get_str(cmd), rq); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap, __entry->nr_sector = blk_rq_sectors(rq); __entry->old_dev = dev; __entry->old_sector = from; - blk_fill_rwbs_rq(__entry->rwbs, rq); + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4349935c2ad8..e92e98189032 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, return -ENODEV; trialcs = alloc_trial_cpuset(cs); - if (!trialcs) - return -ENOMEM; + if (!trialcs) { + retval = -ENOMEM; + goto out; + } switch (cft->private) { case FILE_CPULIST: @@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, } free_trial_cpuset(trialcs); +out: cgroup_unlock(); return retval; } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4571ae7e085a..99c3bc8a6fb4 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -3,6 +3,12 @@ */ #include <linux/irqdesc.h> +#ifdef CONFIG_SPARSE_IRQ +# define IRQ_BITMAP_BITS (NR_IRQS + 8196) +#else +# define IRQ_BITMAP_BITS NR_IRQS +#endif + extern int noirqdebug; #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 282f20230e67..2039bea31bdf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS; EXPORT_SYMBOL_GPL(nr_irqs); static DEFINE_MUTEX(sparse_irq_lock); -static DECLARE_BITMAP(allocated_irqs, NR_IRQS); +static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); #ifdef CONFIG_SPARSE_IRQ @@ -217,6 +217,15 @@ int __init early_irq_init(void) initcnt = arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); + if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) + nr_irqs = IRQ_BITMAP_BITS; + + if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) + initcnt = IRQ_BITMAP_BITS; + + if (initcnt > nr_irqs) + nr_irqs = initcnt; + for (i = 0; i < initcnt; i++) { desc = alloc_desc(i, node); set_bit(i, allocated_irqs); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..2782bacdf494 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -282,8 +282,17 @@ EXPORT_SYMBOL(disable_irq); void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) { - if (resume) + if (resume) { + if (!(desc->status & IRQ_SUSPENDED)) { + if (!desc->action) + return; + if (!(desc->action->flags & IRQF_FORCE_RESUME)) + return; + /* Pretend that it got disabled ! */ + desc->depth++; + } desc->status &= ~IRQ_SUSPENDED; + } switch (desc->depth) { case 0: @@ -1100,7 +1109,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, if (retval) kfree(action); -#ifdef CONFIG_DEBUG_SHIRQ +#ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 0d4005d85b03..d6bfb89cce91 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -53,9 +53,6 @@ void resume_device_irqs(void) for_each_irq_desc(irq, desc) { unsigned long flags; - if (!(desc->status & IRQ_SUSPENDED)) - continue; - raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, true); raw_spin_unlock_irqrestore(&desc->lock, flags); diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 891115a929aa..dc49358b73fa 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -23,7 +23,7 @@ #ifdef CONFIG_HARDIRQS_SW_RESEND /* Bitmap to handle software resend of interrupts: */ -static DECLARE_BITMAP(irqs_resend, NR_IRQS); +static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); /* * Run software resends of IRQ's diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 999835b6112b..656222fcf767 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -782,6 +782,10 @@ retry: raw_spin_unlock_irq(&ctx->lock); } +#define MAX_INTERRUPTS (~0ULL) + +static void perf_log_throttle(struct perf_event *event, int enable); + static int event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, @@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, event->state = PERF_EVENT_STATE_ACTIVE; event->oncpu = smp_processor_id(); + + /* + * Unthrottle events, since we scheduled we might have missed several + * ticks already, also for a heavily scheduling task there is little + * guarantee it'll get a tick in a timely manner. + */ + if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { + perf_log_throttle(event, 1); + event->hw.interrupts = 0; + } + /* * The new state must be visible before we turn it on in the hardware: */ @@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) } } -#define MAX_INTERRUPTS (~0ULL) - -static void perf_log_throttle(struct perf_event *event, int enable); - static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) { u64 frequency = event->attr.sample_freq; diff --git a/kernel/power/main.c b/kernel/power/main.c index 7b5db6a8561e..701853042c28 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); static int __init pm_start_workqueue(void) { - pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); + pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); return pm_wq ? 0 : -ENOMEM; } diff --git a/kernel/power/process.c b/kernel/power/process.c index d6d2a10320e0..0cf3a27a6c9d 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -22,7 +22,7 @@ */ #define TIMEOUT (20 * HZ) -static inline int freezeable(struct task_struct * p) +static inline int freezable(struct task_struct * p) { if ((p == current) || (p->flags & PF_NOFREEZE) || @@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { - if (frozen(p) || !freezeable(p)) + if (frozen(p) || !freezable(p)) continue; if (!freeze_task(p, sig_only)) @@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) read_lock(&tasklist_lock); do_each_thread(g, p) { - if (!freezeable(p)) + if (!freezable(p)) continue; if (nosig_only && should_send_signal(p)) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0dac75ea4456..64db648ff911 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1519,11 +1519,8 @@ static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages, unsigned int nr_highmem) { - int error = 0; - if (nr_highmem > 0) { - error = get_highmem_buffer(PG_ANY); - if (error) + if (get_highmem_buffer(PG_ANY)) goto err_out; if (nr_highmem > alloc_highmem) { nr_highmem -= alloc_highmem; @@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, err_out: swsusp_free(); - return error; + return -ENOMEM; } asmlinkage int swsusp_save(void) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1708b1e2972d..e2302e40b360 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) return !err; } -int ptrace_attach(struct task_struct *task) +static int ptrace_attach(struct task_struct *task) { int retval; @@ -219,7 +219,7 @@ out: * Performs checks and sets PT_PTRACED. * Should be used by all ptrace implementations for PTRACE_TRACEME. */ -int ptrace_traceme(void) +static int ptrace_traceme(void) { int ret = -EPERM; @@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) return false; } -int ptrace_detach(struct task_struct *child, unsigned int data) +static int ptrace_detach(struct task_struct *child, unsigned int data) { bool dead = false; diff --git a/kernel/sched.c b/kernel/sched.c index 18d38e4ec7ba..42eab5a8437d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) { __wake_up_common(q, mode, 1, 0, key); } +EXPORT_SYMBOL_GPL(__wake_up_locked_key); /** * __wake_up_sync_key - wake up threads blocked on a waitqueue. diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ad6267714c84..01f75a5f17af 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { - int this_cpu = smp_processor_id(); struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct sched_rt_entity *rt_se; - rt_se = rt_rq->tg->rt_se[this_cpu]; + int cpu = cpu_of(rq_of_rt_rq(rt_rq)); + + rt_se = rt_rq->tg->rt_se[cpu]; if (rt_rq->rt_nr_running) { if (rt_se && !on_rt_rq(rt_se)) @@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { - int this_cpu = smp_processor_id(); struct sched_rt_entity *rt_se; + int cpu = cpu_of(rq_of_rt_rq(rt_rq)); - rt_se = rt_rq->tg->rt_se[this_cpu]; + rt_se = rt_rq->tg->rt_se[cpu]; if (rt_se && on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); @@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); - } else if (rt_rq->rt_nr_running) + } else if (rt_rq->rt_nr_running) { idle = 0; + if (!rt_rq_throttled(rt_rq)) + enqueue = 1; + } if (enqueue) sched_rt_rq_enqueue(rt_rq); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0f1bd83db985..4eed0af5d144 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -194,9 +194,9 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, static struct ctl_table root_table[]; static struct ctl_table_root sysctl_table_root; static struct ctl_table_header root_table_header = { - .count = 1, + {{.count = 1, .ctl_table = root_table, - .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), + .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, }; @@ -1567,11 +1567,16 @@ void sysctl_head_get(struct ctl_table_header *head) spin_unlock(&sysctl_lock); } +static void free_head(struct rcu_head *rcu) +{ + kfree(container_of(rcu, struct ctl_table_header, rcu)); +} + void sysctl_head_put(struct ctl_table_header *head) { spin_lock(&sysctl_lock); if (!--head->count) - kfree(head); + call_rcu(&head->rcu, free_head); spin_unlock(&sysctl_lock); } @@ -1948,10 +1953,10 @@ void unregister_sysctl_table(struct ctl_table_header * header) start_unregistering(header); if (!--header->parent->count) { WARN_ON(1); - kfree(header->parent); + call_rcu(&header->parent->rcu, free_head); } if (!--header->count) - kfree(header); + call_rcu(&header->rcu, free_head); spin_unlock(&sysctl_lock); } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 48b2761b5668..a3b5aff62606 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void) return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; } +/* + * Check whether the broadcast device supports oneshot. + */ +bool tick_broadcast_oneshot_available(void) +{ + struct clock_event_device *bc = tick_broadcast_device.evtdev; + + return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; +} + #endif diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 051bc80a0c43..ed228ef6f6b8 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -51,7 +51,11 @@ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); - return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); + if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) + return 0; + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) + return 1; + return tick_broadcast_oneshot_available(); } /* diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 290eefbc1f60..f65d3a723a64 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_broadcast_oneshot_active(void); extern void tick_check_oneshot_broadcast(int cpu); +bool tick_broadcast_oneshot_available(void); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { @@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline void tick_check_oneshot_broadcast(int cpu) { } +static inline bool tick_broadcast_oneshot_available(void) { return true; } # endif /* !BROADCAST */ #else /* !ONESHOT */ @@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) return 0; } static inline int tick_broadcast_oneshot_active(void) { return 0; } +static inline bool tick_broadcast_oneshot_available(void) { return false; } #endif /* !TICK_ONESHOT */ /* diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d95721f33702..cbafed7d4f38 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) rwbs[i] = '\0'; } -void blk_fill_rwbs_rq(char *rwbs, struct request *rq) -{ - int rw = rq->cmd_flags & 0x03; - int bytes; - - if (rq->cmd_flags & REQ_DISCARD) - rw |= REQ_DISCARD; - - if (rq->cmd_flags & REQ_SECURE) - rw |= REQ_SECURE; - - bytes = blk_rq_bytes(rq); - - blk_fill_rwbs(rwbs, rw, bytes); -} - #endif /* CONFIG_EVENT_TRACING */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..ee6578b578ad 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -79,7 +79,9 @@ enum { MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ - MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ + MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, + /* call for help after 10ms + (min two ticks) */ MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ CREATE_COOLDOWN = HZ, /* time to breath after fail */ TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ @@ -2047,6 +2049,15 @@ repeat: move_linked_works(work, scheduled, &n); process_scheduled_works(rescuer); + + /* + * Leave this gcwq. If keep_working() is %true, notify a + * regular worker; otherwise, we end up with 0 concurrency + * and stalling the execution. + */ + if (keep_working(gcwq)) + wake_up_worker(gcwq); + spin_unlock_irq(&gcwq->lock); } @@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, */ spin_lock(&workqueue_lock); - if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) + if (workqueue_freezing && wq->flags & WQ_FREEZABLE) for_each_cwq_cpu(cpu, wq) get_cwq(cpu, wq)->max_active = 0; @@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) spin_lock_irq(&gcwq->lock); - if (!(wq->flags & WQ_FREEZEABLE) || + if (!(wq->flags & WQ_FREEZABLE) || !(gcwq->flags & GCWQ_FREEZING)) get_cwq(gcwq->cpu, wq)->max_active = max_active; @@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) * want to get it over with ASAP - spam rescuers, wake up as * many idlers as necessary and create new ones till the * worklist is empty. Note that if the gcwq is frozen, there - * may be frozen works in freezeable cwqs. Don't declare + * may be frozen works in freezable cwqs. Don't declare * completion while frozen. */ while (gcwq->nr_workers != gcwq->nr_idle || @@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); /** * freeze_workqueues_begin - begin freezing workqueues * - * Start freezing workqueues. After this function returns, all - * freezeable workqueues will queue new works to their frozen_works - * list instead of gcwq->worklist. + * Start freezing workqueues. After this function returns, all freezable + * workqueues will queue new works to their frozen_works list instead of + * gcwq->worklist. * * CONTEXT: * Grabs and releases workqueue_lock and gcwq->lock's. @@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (cwq && wq->flags & WQ_FREEZEABLE) + if (cwq && wq->flags & WQ_FREEZABLE) cwq->max_active = 0; } @@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) } /** - * freeze_workqueues_busy - are freezeable workqueues still busy? + * freeze_workqueues_busy - are freezable workqueues still busy? * * Check whether freezing is complete. This function must be called * between freeze_workqueues_begin() and thaw_workqueues(). @@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) * Grabs and releases workqueue_lock. * * RETURNS: - * %true if some freezeable workqueues are still busy. %false if - * freezing is complete. + * %true if some freezable workqueues are still busy. %false if freezing + * is complete. */ bool freeze_workqueues_busy(void) { @@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZABLE)) continue; BUG_ON(cwq->nr_active < 0); @@ -3690,7 +3701,7 @@ void thaw_workqueues(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZABLE)) continue; /* restore max_active and repopulate worklist */ diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16ca..b8029a5583ff 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -35,6 +35,31 @@ void __list_add(struct list_head *new, } EXPORT_SYMBOL(__list_add); +void __list_del_entry(struct list_head *entry) +{ + struct list_head *prev, *next; + + prev = entry->prev; + next = entry->next; + + if (WARN(next == LIST_POISON1, + "list_del corruption, %p->next is LIST_POISON1 (%p)\n", + entry, LIST_POISON1) || + WARN(prev == LIST_POISON2, + "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", + entry, LIST_POISON2) || + WARN(prev->next != entry, + "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, prev->next) || + WARN(next->prev != entry, + "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, next->prev)) + return; + + __list_del(prev, next); +} +EXPORT_SYMBOL(__list_del_entry); + /** * list_del - deletes entry from list. * @entry: the element to delete from the list. @@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); */ void list_del(struct list_head *entry) { - WARN(entry->next == LIST_POISON1, - "list_del corruption, next is LIST_POISON1 (%p)\n", - LIST_POISON1); - WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, - "list_del corruption, prev is LIST_POISON2 (%p)\n", - LIST_POISON2); - WARN(entry->prev->next != entry, - "list_del corruption. prev->next should be %p, " - "but was %p\n", entry, entry->prev->next); - WARN(entry->next->prev != entry, - "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, entry->next->prev); - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } diff --git a/lib/nlattr.c b/lib/nlattr.c index 5021cbc34411..ac09f2226dc7 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; - for (i = 0; i < n; i++) { + for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_minlen[p->type]) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c47bbe11b804..93ca08b8a451 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (!dma_capable(dev, dev_addr, size)) - panic("map_single: bounce buffer is not DMA'ble"); + if (!dma_capable(dev, dev_addr, size)) { + swiotlb_tbl_unmap_single(dev, map, size, dir); + dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); + } return dev_addr; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3e29781ee762..113e35c47502 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag) static inline struct page *alloc_hugepage_vma(int defrag, struct vm_area_struct *vma, - unsigned long haddr) + unsigned long haddr, int nd) { return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), - HPAGE_PMD_ORDER, vma, haddr); + HPAGE_PMD_ORDER, vma, haddr, nd); } #ifndef CONFIG_NUMA @@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(khugepaged_enter(vma))) return VM_FAULT_OOM; page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), - vma, haddr); + vma, haddr, numa_node_id()); if (unlikely(!page)) goto out; if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { @@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, } for (i = 0; i < HPAGE_PMD_NR; i++) { - pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, - vma, address); + pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, + vma, address, page_to_nid(page)); if (unlikely(!pages[i] || mem_cgroup_newpage_charge(pages[i], mm, GFP_KERNEL))) { @@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), - vma, haddr); + vma, haddr, numa_node_id()); else new_page = NULL; @@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, - struct vm_area_struct *vma) + struct vm_area_struct *vma, + int node) { pgd_t *pgd; pud_t *pud; @@ -1761,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm, #ifndef CONFIG_NUMA VM_BUG_ON(!*hpage); new_page = *hpage; + if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { + up_read(&mm->mmap_sem); + return; + } #else VM_BUG_ON(*hpage); /* @@ -1773,18 +1778,19 @@ static void collapse_huge_page(struct mm_struct *mm, * mmap_sem in read mode is good idea also to allow greater * scalability. */ - new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); + new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, + node); if (unlikely(!new_page)) { up_read(&mm->mmap_sem); *hpage = ERR_PTR(-ENOMEM); return; } -#endif if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { up_read(&mm->mmap_sem); put_page(new_page); return; } +#endif /* after allocating the hugepage upgrade to mmap_sem write mode */ up_read(&mm->mmap_sem); @@ -1919,6 +1925,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct page *page; unsigned long _address; spinlock_t *ptl; + int node = -1; VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -1949,6 +1956,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, page = vm_normal_page(vma, _address, pteval); if (unlikely(!page)) goto out_unmap; + /* + * Chose the node of the first page. This could + * be more sophisticated and look at more pages, + * but isn't for now. + */ + if (node == -1) + node = page_to_nid(page); VM_BUG_ON(PageCompound(page)); if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) goto out_unmap; @@ -1965,7 +1979,7 @@ out_unmap: pte_unmap_unlock(pte, ptl); if (ret) /* collapse_huge_page will return with the mmap_sem released */ - collapse_huge_page(mm, address, hpage, vma); + collapse_huge_page(mm, address, hpage, vma, node); out: return ret; } diff --git a/mm/memory.c b/mm/memory.c index 8e8c18324863..5823698c2b71 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping, details.last_index = ULONG_MAX; details.i_mmap_lock = &mapping->i_mmap_lock; + mutex_lock(&mapping->unmap_mutex); spin_lock(&mapping->i_mmap_lock); /* Protect against endless unmapping loops */ @@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping, if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->unmap_mutex); } EXPORT_SYMBOL(unmap_mapping_range); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 368fc9d23610..b53ec99f1428 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) } /* Return a zonelist indicated by gfp for node representing a mempolicy */ -static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) +static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, + int nd) { - int nd = numa_node_id(); - switch (policy->mode) { case MPOL_PREFERRED: if (!(policy->flags & MPOL_F_LOCAL)) @@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, zl = node_zonelist(interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))), gfp_flags); } else { - zl = policy_zonelist(gfp_flags, *mpol); + zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); if ((*mpol)->mode == MPOL_BIND) *nodemask = &(*mpol)->v.nodes; } @@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr) + unsigned long addr, int node) { struct mempolicy *pol = get_vma_policy(current, vma, addr); struct zonelist *zl; @@ -1830,13 +1829,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, if (unlikely(pol->mode == MPOL_INTERLEAVE)) { unsigned nid; - nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); + nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); put_mems_allowed(); return page; } - zl = policy_zonelist(gfp, pol); + zl = policy_zonelist(gfp, pol, node); if (unlikely(mpol_needs_cond_ref(pol))) { /* * slow path: ref counted shared policy @@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, - policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); + policy_zonelist(gfp, pol, numa_node_id()), + policy_nodemask(gfp, pol)); put_mems_allowed(); return page; } diff --git a/mm/migrate.c b/mm/migrate.c index 766115253807..352de555626c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, return -EPERM; /* Find the mm_struct */ - read_lock(&tasklist_lock); + rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { - read_unlock(&tasklist_lock); + rcu_read_unlock(); return -ESRCH; } mm = get_task_mm(task); - read_unlock(&tasklist_lock); + rcu_read_unlock(); if (!mm) return -EINVAL; diff --git a/mm/mremap.c b/mm/mremap.c index 9925b6391b80..1de98d492ddc 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); - if (new_vma->vm_truncate_count && - new_vma->vm_truncate_count != vma->vm_truncate_count) - new_vma->vm_truncate_count = 0; + new_vma->vm_truncate_count = 0; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a873e61e312e..cdef1d4b4e47 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count) for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { unsigned long check = pfn + iter; - if (!pfn_valid_within(check)) { - iter++; + if (!pfn_valid_within(check)) continue; - } + page = pfn_to_page(check); if (!page_count(page)) { if (PageBuddy(page)) diff --git a/mm/rmap.c b/mm/rmap.c index f21f4a1d6a1c..941bf82e8961 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; int referenced = 0; - /* - * Don't want to elevate referenced for mlocked page that gets this far, - * in order that it progresses to try_to_unmap and is moved to the - * unevictable list. - */ - if (vma->vm_flags & VM_LOCKED) { - *mapcount = 0; /* break early from loop */ - *vm_flags |= VM_LOCKED; - goto out; - } - - /* Pretend the page is referenced if the task has the - swap token and is in the middle of a page fault. */ - if (mm != current->mm && has_swap_token(mm) && - rwsem_is_locked(&mm->mmap_sem)) - referenced++; - if (unlikely(PageTransHuge(page))) { pmd_t *pmd; spin_lock(&mm->page_table_lock); + /* + * rmap might return false positives; we must filter + * these out using page_check_address_pmd(). + */ pmd = page_check_address_pmd(page, mm, address, PAGE_CHECK_ADDRESS_PMD_FLAG); - if (pmd && !pmd_trans_splitting(*pmd) && - pmdp_clear_flush_young_notify(vma, address, pmd)) + if (!pmd) { + spin_unlock(&mm->page_table_lock); + goto out; + } + + if (vma->vm_flags & VM_LOCKED) { + spin_unlock(&mm->page_table_lock); + *mapcount = 0; /* break early from loop */ + *vm_flags |= VM_LOCKED; + goto out; + } + + /* go ahead even if the pmd is pmd_trans_splitting() */ + if (pmdp_clear_flush_young_notify(vma, address, pmd)) referenced++; spin_unlock(&mm->page_table_lock); } else { pte_t *pte; spinlock_t *ptl; + /* + * rmap might return false positives; we must filter + * these out using page_check_address(). + */ pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; + if (vma->vm_flags & VM_LOCKED) { + pte_unmap_unlock(pte, ptl); + *mapcount = 0; /* break early from loop */ + *vm_flags |= VM_LOCKED; + goto out; + } + if (ptep_clear_flush_young_notify(vma, address, pte)) { /* * Don't treat a reference through a sequentially read @@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); } + /* Pretend the page is referenced if the task has the + swap token and is in the middle of a page fault. */ + if (mm != current->mm && has_swap_token(mm) && + rwsem_is_locked(&mm->mmap_sem)) + referenced++; + (*mapcount)--; if (referenced) diff --git a/mm/swapfile.c b/mm/swapfile.c index 07a458d72fa8..0341c5700e34 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -EINVAL; if (S_ISBLK(inode->i_mode)) { - bdev = I_BDEV(inode); + bdev = bdgrab(I_BDEV(inode)); error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, sys_swapon); if (error < 0) { diff --git a/mm/truncate.c b/mm/truncate.c index 49feb46e77b8..d64296be00d3 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping, next = start; while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + mem_cgroup_uncharge_start(); for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index = page->index; @@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping, unlock_page(page); } pagevec_release(&pvec); + mem_cgroup_uncharge_end(); cond_resched(); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 17497d0cd8b9..6771ea70bfe7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone, if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) return false; - /* - * If we failed to reclaim and have scanned the full list, stop. - * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far - * faster but obviously would be less likely to succeed - * allocation. If this is desirable, use GFP_REPEAT to decide - * if both reclaimed and scanned should be checked or just - * reclaimed - */ - if (!nr_reclaimed && !nr_scanned) - return false; + /* Consider stopping depending on scan and reclaim activity */ + if (sc->gfp_mask & __GFP_REPEAT) { + /* + * For __GFP_REPEAT allocations, stop reclaiming if the + * full LRU list has been scanned and we are still failing + * to reclaim pages. This full LRU scan is potentially + * expensive but a __GFP_REPEAT caller really wants to succeed + */ + if (!nr_reclaimed && !nr_scanned) + return false; + } else { + /* + * For non-__GFP_REPEAT allocations which can presumably + * fail without consequence, stop if we failed to reclaim + * any pages from the last SWAP_CLUSTER_MAX number of + * pages that were scanned. This will return to the + * caller faster at the risk reclaim/compaction and + * the resulting allocation attempt fails + */ + if (!nr_reclaimed) + return false; + } /* * If we have not reclaimed enough pages for compaction and the diff --git a/net/Makefile b/net/Makefile index a3330ebe2c53..a51d9465e628 100644 --- a/net/Makefile +++ b/net/Makefile @@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_XFRM) += xfrm/ obj-$(CONFIG_UNIX) += unix/ -ifneq ($(CONFIG_IPV6),) -obj-y += ipv6/ -endif +obj-$(CONFIG_NET) += ipv6/ obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ obj-$(CONFIG_BRIDGE) += bridge/ diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7550abb0c96a..675614e38e14 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; + sk->sk_state = BT_DISCONN; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2575c2db6404..d7b9af4703d0 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) break; } + tty_unlock(); schedule(); + tty_lock(); } set_current_state(TASK_RUNNING); remove_wait_queue(&dev->wait, &wait); diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 9190ae462cb4..6dee7bf648a9 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig @@ -6,6 +6,7 @@ config BRIDGE tristate "802.1d Ethernet Bridging" select LLC select STP + depends on IPV6 || IPV6=n ---help--- If you say Y here, then your Linux box will be able to act as an Ethernet bridge, which means that the different Ethernet segments it diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6f6d8e1b776f..88e4aa9cb1f9 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { - if ((mdst && !hlist_unhashed(&mdst->mglist)) || + if ((mdst && mdst->mglist) || br_multicast_is_router(br)) skb2 = skb; br_multicast_forward(mdst, skb, skb2); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f701a21acb34..030a002ff8ee 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -37,10 +37,9 @@ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -static inline int ipv6_is_local_multicast(const struct in6_addr *addr) +static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { - if (ipv6_addr_is_multicast(addr) && - IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) + if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) return 1; return 0; } @@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data) if (!netif_running(br->dev) || timer_pending(&mp->timer)) goto out; - if (!hlist_unhashed(&mp->mglist)) - hlist_del_init(&mp->mglist); + mp->mglist = false; if (mp->ports) goto out; @@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br, del_timer(&p->query_timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); - if (!mp->ports && hlist_unhashed(&mp->mglist) && + if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); @@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, eth = eth_hdr(skb); memcpy(eth->h_source, br->dev->dev_addr, 6); - ipv6_eth_mc_map(group, eth->h_dest); eth->h_proto = htons(ETH_P_IPV6); skb_put(skb, sizeof(*eth)); @@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, ip6h->payload_len = htons(8 + sizeof(*mldq)); ip6h->nexthdr = IPPROTO_HOPOPTS; ip6h->hop_limit = 1; - ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); + ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, + &ip6h->saddr); ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); + ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); hopopt = (u8 *)(ip6h + 1); hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ @@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data) struct net_bridge *br = mp->br; spin_lock(&br->multicast_lock); - if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || + if (!netif_running(br->dev) || !mp->mglist || mp->queries_sent >= br->multicast_last_member_count) goto out; @@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + mp->mglist = true; mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; } @@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, { struct br_ip br_group; - if (ipv6_is_local_multicast(group)) + if (!ipv6_is_transient_multicast(group)) return 0; ipv6_addr_copy(&br_group.u.ip6, group); - br_group.proto = htons(ETH_P_IP); + br_group.proto = htons(ETH_P_IPV6); return br_multicast_add_group(br, port, &br_group); } @@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, nsrcs = skb_header_pointer(skb, len + offsetof(struct mld2_grec, - grec_mca), + grec_nsrcs), sizeof(_nsrcs), &_nsrcs); if (!nsrcs) return -EINVAL; if (!pskb_may_pull(skb, len + sizeof(*grec) + - sizeof(struct in6_addr) * (*nsrcs))) + sizeof(struct in6_addr) * ntohs(*nsrcs))) return -EINVAL; grec = (struct mld2_grec *)(skb->data + len); - len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); + len += sizeof(*grec) + + sizeof(struct in6_addr) * ntohs(*nsrcs); /* We treat these as MLDv1 reports for now. */ switch (grec->grec_type) { @@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, max_delay *= br->multicast_last_member_count; - if (!hlist_unhashed(&mp->mglist) && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) @@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) - mod_timer(&mp->timer, now + max_delay); + mod_timer(&p->timer, now + max_delay); } out: @@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, goto out; max_delay *= br->multicast_last_member_count; - if (!hlist_unhashed(&mp->mglist) && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) @@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) - mod_timer(&mp->timer, now + max_delay); + mod_timer(&p->timer, now + max_delay); } out: @@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br, br->multicast_last_member_interval; if (!port) { - if (!hlist_unhashed(&mp->mglist) && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { @@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, { struct br_ip br_group; - if (ipv6_is_local_multicast(group)) + if (!ipv6_is_transient_multicast(group)) return; ipv6_addr_copy(&br_group.u.ip6, group); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 84aac7734bfc..4e1b620b6be6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -84,13 +84,13 @@ struct net_bridge_port_group { struct net_bridge_mdb_entry { struct hlist_node hlist[2]; - struct hlist_node mglist; struct net_bridge *br; struct net_bridge_port_group __rcu *ports; struct rcu_head rcu; struct timer_list timer; struct timer_list query_timer; struct br_ip addr; + bool mglist; u32 queries_sent; }; @@ -238,7 +238,6 @@ struct net_bridge spinlock_t multicast_lock; struct net_bridge_mdb_htable __rcu *mdb; struct hlist_head router_list; - struct hlist_head mglist; struct timer_list multicast_router_timer; struct timer_list multicast_querier_timer; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dff633d62e5b..05f357828a2f 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) { struct kvec iov = {buf, len}; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; - return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + if (r == -EAGAIN) + r = 0; + return r; } /* @@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, size_t kvlen, size_t len, int more) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; if (more) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ - return kernel_sendmsg(sock, &msg, iov, kvlen, len); + r = kernel_sendmsg(sock, &msg, iov, kvlen, len); + if (r == -EAGAIN) + r = 0; + return r; } @@ -328,7 +336,6 @@ static void reset_connection(struct ceph_connection *con) ceph_msg_put(con->out_msg); con->out_msg = NULL; } - con->out_keepalive_pending = false; con->in_seq = 0; con->in_seq_acked = 0; } @@ -847,6 +854,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) (msg->pages || msg->pagelist || msg->bio || in_trail)) kunmap(page); + if (ret == -EAGAIN) + ret = 0; if (ret <= 0) goto out; @@ -1238,8 +1247,6 @@ static int process_connect(struct ceph_connection *con) con->auth_retry); if (con->auth_retry == 2) { con->error_msg = "connect authorization failure"; - reset_connection(con); - set_bit(CLOSED, &con->state); return -1; } con->auth_retry = 1; @@ -1705,14 +1712,6 @@ more: /* open the socket first? */ if (con->sock == NULL) { - /* - * if we were STANDBY and are reconnecting _this_ - * connection, bump connect_seq now. Always bump - * global_seq. - */ - if (test_and_clear_bit(STANDBY, &con->state)) - con->connect_seq++; - prepare_write_banner(msgr, con); prepare_write_connect(msgr, con, 1); prepare_read_banner(con); @@ -1737,16 +1736,12 @@ more_kvec: if (con->out_skip) { ret = write_partial_skip(con); if (ret <= 0) - goto done; - if (ret < 0) { - dout("try_write write_partial_skip err %d\n", ret); - goto done; - } + goto out; } if (con->out_kvec_left) { ret = write_partial_kvec(con); if (ret <= 0) - goto done; + goto out; } /* msg pages? */ @@ -1761,11 +1756,11 @@ more_kvec: if (ret == 1) goto more_kvec; /* we need to send the footer, too! */ if (ret == 0) - goto done; + goto out; if (ret < 0) { dout("try_write write_partial_msg_pages err %d\n", ret); - goto done; + goto out; } } @@ -1789,10 +1784,9 @@ do_next: /* Nothing to do! */ clear_bit(WRITE_PENDING, &con->state); dout("try_write nothing else to write.\n"); -done: ret = 0; out: - dout("try_write done on %p\n", con); + dout("try_write done on %p ret %d\n", con, ret); return ret; } @@ -1821,19 +1815,17 @@ more: dout("try_read connecting\n"); ret = read_partial_banner(con); if (ret <= 0) - goto done; - if (process_banner(con) < 0) { - ret = -1; goto out; - } + ret = process_banner(con); + if (ret < 0) + goto out; } ret = read_partial_connect(con); if (ret <= 0) - goto done; - if (process_connect(con) < 0) { - ret = -1; goto out; - } + ret = process_connect(con); + if (ret < 0) + goto out; goto more; } @@ -1848,7 +1840,7 @@ more: dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); ret = ceph_tcp_recvmsg(con->sock, buf, skip); if (ret <= 0) - goto done; + goto out; con->in_base_pos += ret; if (con->in_base_pos) goto more; @@ -1859,7 +1851,7 @@ more: */ ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); if (ret <= 0) - goto done; + goto out; dout("try_read got tag %d\n", (int)con->in_tag); switch (con->in_tag) { case CEPH_MSGR_TAG_MSG: @@ -1870,7 +1862,7 @@ more: break; case CEPH_MSGR_TAG_CLOSE: set_bit(CLOSED, &con->state); /* fixme */ - goto done; + goto out; default: goto bad_tag; } @@ -1882,13 +1874,12 @@ more: case -EBADMSG: con->error_msg = "bad crc"; ret = -EIO; - goto out; + break; case -EIO: con->error_msg = "io error"; - goto out; - default: - goto done; + break; } + goto out; } if (con->in_tag == CEPH_MSGR_TAG_READY) goto more; @@ -1898,15 +1889,13 @@ more: if (con->in_tag == CEPH_MSGR_TAG_ACK) { ret = read_partial_ack(con); if (ret <= 0) - goto done; + goto out; process_ack(con); goto more; } -done: - ret = 0; out: - dout("try_read done on %p\n", con); + dout("try_read done on %p ret %d\n", con, ret); return ret; bad_tag: @@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work) work.work); mutex_lock(&con->mutex); + if (test_and_clear_bit(BACKOFF, &con->state)) { + dout("con_work %p backing off\n", con); + if (queue_delayed_work(ceph_msgr_wq, &con->work, + round_jiffies_relative(con->delay))) { + dout("con_work %p backoff %lu\n", con, con->delay); + mutex_unlock(&con->mutex); + return; + } else { + con->ops->put(con); + dout("con_work %p FAILED to back off %lu\n", con, + con->delay); + } + } + if (test_bit(STANDBY, &con->state)) { + dout("con_work %p STANDBY\n", con); + goto done; + } if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ dout("con_work CLOSED\n"); con_close_socket(con); @@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con) /* Requeue anything that hasn't been acked */ list_splice_init(&con->out_sent, &con->out_queue); - /* If there are no messages in the queue, place the connection - * in a STANDBY state (i.e., don't try to reconnect just yet). */ - if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { - dout("fault setting STANDBY\n"); + /* If there are no messages queued or keepalive pending, place + * the connection in a STANDBY state */ + if (list_empty(&con->out_queue) && + !test_bit(KEEPALIVE_PENDING, &con->state)) { + dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); + clear_bit(WRITE_PENDING, &con->state); set_bit(STANDBY, &con->state); } else { /* retry after a delay. */ @@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con) con->delay = BASE_DELAY_INTERVAL; else if (con->delay < MAX_DELAY_INTERVAL) con->delay *= 2; - dout("fault queueing %p delay %lu\n", con, con->delay); con->ops->get(con); if (queue_delayed_work(ceph_msgr_wq, &con->work, - round_jiffies_relative(con->delay)) == 0) + round_jiffies_relative(con->delay))) { + dout("fault queued %p delay %lu\n", con, con->delay); + } else { con->ops->put(con); + dout("fault failed to queue %p delay %lu, backoff\n", + con, con->delay); + /* + * In many cases we see a socket state change + * while con_work is running and end up + * queuing (non-delayed) work, such that we + * can't backoff with a delay. Set a flag so + * that when con_work restarts we schedule the + * delay then. + */ + set_bit(BACKOFF, &con->state); + } } out_unlock: @@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr) } EXPORT_SYMBOL(ceph_messenger_destroy); +static void clear_standby(struct ceph_connection *con) +{ + /* come back from STANDBY? */ + if (test_and_clear_bit(STANDBY, &con->state)) { + mutex_lock(&con->mutex); + dout("clear_standby %p and ++connect_seq\n", con); + con->connect_seq++; + WARN_ON(test_bit(WRITE_PENDING, &con->state)); + WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); + mutex_unlock(&con->mutex); + } +} + /* * Queue up an outgoing message on the given connection. */ @@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) /* if there wasn't anything waiting to send before, queue * new work */ + clear_standby(con); if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) queue_con(con); } @@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) */ void ceph_con_keepalive(struct ceph_connection *con) { + dout("con_keepalive %p\n", con); + clear_standby(con); if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && test_and_set_bit(WRITE_PENDING, &con->state) == 0) queue_con(con); diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 1a040e64c69f..cd9c21df87d1 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c @@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data, int num_pages, bool write_page) { struct page **pages; - int rc; + int got = 0; + int rc = 0; pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); if (!pages) return ERR_PTR(-ENOMEM); down_read(¤t->mm->mmap_sem); - rc = get_user_pages(current, current->mm, (unsigned long)data, - num_pages, write_page, 0, pages, NULL); + while (got < num_pages) { + rc = get_user_pages(current, current->mm, + (unsigned long)data + ((unsigned long)got * PAGE_SIZE), + num_pages - got, write_page, 0, pages + got, NULL); + if (rc < 0) + break; + BUG_ON(rc == 0); + got += rc; + } up_read(¤t->mm->mmap_sem); - if (rc < num_pages) + if (rc < 0) goto fail; return pages; fail: - ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); + ceph_put_page_vector(pages, got, false); return ERR_PTR(rc); } EXPORT_SYMBOL(ceph_get_direct_page_vector); diff --git a/net/core/dev.c b/net/core/dev.c index 8e726cb47ed7..6561021d22d1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change); void dev_load(struct net *net, const char *name) { struct net_device *dev; + int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); - if (!dev && capable(CAP_NET_ADMIN)) - request_module("%s", name); + no_module = !dev; + if (no_module && capable(CAP_NET_ADMIN)) + no_module = request_module("netdev-%s", name); + if (no_module && capable(CAP_SYS_MODULE)) { + if (!request_module("%s", name)) + pr_err("Loading kernel module for a network device " +"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " +"instead\n", name); + } } EXPORT_SYMBOL(dev_load); @@ -1280,10 +1288,13 @@ static int __dev_close_many(struct list_head *head) static int __dev_close(struct net_device *dev) { + int retval; LIST_HEAD(single); list_add(&dev->unreg_list, &single); - return __dev_close_many(&single); + retval = __dev_close_many(&single); + list_del(&single); + return retval; } int dev_close_many(struct list_head *head) @@ -1325,7 +1336,7 @@ int dev_close(struct net_device *dev) list_add(&dev->unreg_list, &single); dev_close_many(&single); - + list_del(&single); return 0; } EXPORT_SYMBOL(dev_close); @@ -5063,6 +5074,7 @@ static void rollback_registered(struct net_device *dev) list_add(&dev->unreg_list, &single); rollback_registered_many(&single); + list_del(&single); } unsigned long netdev_fix_features(unsigned long features, const char *name) @@ -6216,6 +6228,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) } } unregister_netdevice_many(&dev_kill_list); + list_del(&dev_kill_list); rtnl_unlock(); } diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 508f9c18992f..133fd22ea287 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, list_for_each_entry(ha, &from_list->list, list) { type = addr_type ? addr_type : ha->type; - __hw_addr_del(to_list, ha->addr, addr_len, addr_type); + __hw_addr_del(to_list, ha->addr, addr_len, type); } } EXPORT_SYMBOL(__hw_addr_del_multiple); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a9e7fc4c461f..b5bada92f637 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) pkt_dev->started_at); ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); - p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", + p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", (unsigned long long)ktime_to_us(elapsed), (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), (unsigned long long)ktime_to_us(idle), diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 6b03f561caec..c44348adba3b 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, dcb->cmd = DCB_CMD_GAPP; app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); + if (!app_nest) + goto out_cancel; + ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; @@ -1190,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, goto err; } - if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { + if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); if (err) @@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp); u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr; + struct dcb_app_type event; + + memcpy(&event.name, dev->name, sizeof(event.name)); + memcpy(&event.app, new, sizeof(event.app)); spin_lock(&dcb_lock); /* Search for existing match and replace */ @@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) } out: spin_unlock(&dcb_lock); - call_dcbevent_notifiers(DCB_APP_EVENT, new); + call_dcbevent_notifiers(DCB_APP_EVENT, &event); return 0; } EXPORT_SYMBOL(dcb_setapp); diff --git a/net/dccp/input.c b/net/dccp/input.c index 8cde009e8b85..4222e7a654b0 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* Caller (dccp_v4_do_rcv) will send Reset */ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; return 1; + } else if (sk->sk_state == DCCP_CLOSED) { + dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; + return 1; } if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { @@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } switch (sk->sk_state) { - case DCCP_CLOSED: - dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; - return 1; - case DCCP_REQUESTING: queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); if (queued >= 0) diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 739435a6af39..cfa7a5e1c5c9 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) size_t result_len = 0; const char *data = _data, *end, *opt; - kenter("%%%d,%s,'%s',%zu", - key->serial, key->description, data, datalen); + kenter("%%%d,%s,'%*.*s',%zu", + key->serial, key->description, + (int)datalen, (int)datalen, data, datalen); if (datalen <= 1 || !data || data[datalen - 1] != '\0') return -EINVAL; @@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) seq_printf(m, ": %u", key->datalen); } +/* + * read the DNS data + * - the key's semaphore is read-locked + */ +static long dns_resolver_read(const struct key *key, + char __user *buffer, size_t buflen) +{ + if (key->type_data.x[0]) + return key->type_data.x[0]; + + return user_read(key, buffer, buflen); +} + struct key_type key_type_dns_resolver = { .name = "dns_resolver", .instantiate = dns_resolver_instantiate, @@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = { .revoke = user_revoke, .destroy = user_destroy, .describe = dns_resolver_describe, - .read = user_read, + .read = dns_resolver_read, }; static int __init init_dns_resolver(void) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 748cb5b337bd..036652c8166d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) ifap = &ifa->ifa_next) { if (!strcmp(ifr.ifr_name, ifa->ifa_label) && sin_orig.sin_addr.s_addr == - ifa->ifa_address) { + ifa->ifa_local) { break; /* found */ } } @@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) return mtu >= 68; } +static void inetdev_send_gratuitous_arp(struct net_device *dev, + struct in_device *in_dev) + +{ + struct in_ifaddr *ifa = in_dev->ifa_list; + + if (!ifa) + return; + + arp_send(ARPOP_REQUEST, ETH_P_ARP, + ifa->ifa_local, dev, + ifa->ifa_local, NULL, + dev->dev_addr, NULL); +} + /* Called only under RTNL semaphore */ static int inetdev_event(struct notifier_block *this, unsigned long event, @@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, } ip_mc_up(in_dev); /* fall through */ - case NETDEV_NOTIFY_PEERS: case NETDEV_CHANGEADDR: + if (!IN_DEV_ARP_NOTIFY(in_dev)) + break; + /* fall through */ + case NETDEV_NOTIFY_PEERS: /* Send gratuitous ARP to notify of link change */ - if (IN_DEV_ARP_NOTIFY(in_dev)) { - struct in_ifaddr *ifa = in_dev->ifa_list; - - if (ifa) - arp_send(ARPOP_REQUEST, ETH_P_ARP, - ifa->ifa_address, dev, - ifa->ifa_address, NULL, - dev->dev_addr, NULL); - } + inetdev_send_gratuitous_arp(dev, in_dev); break; case NETDEV_DOWN: ip_mc_down(in_dev); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c5af909cf701..3c8dfa16614d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -505,7 +505,9 @@ restart: } rcu_read_unlock(); + local_bh_disable(); inet_twsk_deschedule(tw, twdr); + local_bh_enable(); inet_twsk_put(tw); goto restart_rcu; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index eb68a0e34e49..d1d0e2c256fc 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev .fl4_dst = dst, .fl4_src = tiph->saddr, .fl4_tos = RT_TOS(tos), + .proto = IPPROTO_GRE, .fl_gre_key = tunnel->parms.o_key }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { @@ -1764,4 +1765,4 @@ module_exit(ipgre_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("gre"); MODULE_ALIAS_RTNL_LINK("gretap"); -MODULE_ALIAS("gre0"); +MODULE_ALIAS_NETDEV("gre0"); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 988f52fba54a..a5f58e7cbb26 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -913,4 +913,4 @@ static void __exit ipip_fini(void) module_init(ipip_init); module_exit(ipip_fini); MODULE_LICENSE("GPL"); -MODULE_ALIAS("tunl0"); +MODULE_ALIAS_NETDEV("tunl0"); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 788a3e74834e..6ed6603c2f6d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .destroy = ipv4_dst_destroy, .check = ipv4_blackhole_dst_check, .default_mtu = ipv4_blackhole_default_mtu, + .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb7f82ebf4a3..65f6c0406245 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, } /* D-SACK for already forgotten data... Do dumb counting. */ - if (dup_sack && + if (dup_sack && tp->undo_marker && tp->undo_retrans && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; @@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { - if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) + if (tp->undo_marker && tp->undo_retrans && + after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 406f320336e6..dfa5beb0c1c8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) if (!tp->retrans_stamp) tp->retrans_stamp = TCP_SKB_CB(skb)->when; - tp->undo_retrans++; + tp->undo_retrans += tcp_skb_pcount(skb); /* snd_nxt is stored to detect loss of retransmitted segment, * see tcp_input.c tcp_sacktag_write_queue(). diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4f4483e697bd..e528a42a52be 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -57,6 +57,7 @@ MODULE_AUTHOR("Ville Nuorvala"); MODULE_DESCRIPTION("IPv6 tunneling device"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETDEV("ip6tnl0"); #ifdef IP6_TNL_DEBUG #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 09c88891a753..de338037a736 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -410,7 +410,7 @@ fallback: if (p != NULL) { sb_add(m, "%02x", *p++); for (i = 1; i < len; i++) - sb_add(m, ":%02x", p[i]); + sb_add(m, ":%02x", *p++); } sb_add(m, " "); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1c29f95695de..e7db7014e89f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { .destroy = ip6_dst_destroy, .check = ip6_dst_check, .default_mtu = ip6_blackhole_default_mtu, + .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, }; @@ -738,8 +739,10 @@ restart: if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); - else + else if (!(rt->dst.flags & DST_HOST)) nrt = rt6_alloc_clone(rt, &fl->fl6_dst); + else + goto out2; dst_release(&rt->dst); rt = nrt ? : net->ipv6.ip6_null_entry; @@ -2556,14 +2559,16 @@ static int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; - int delay = net->ipv6.sysctl.flush_delay; - if (write) { - proc_dointvec(ctl, write, buffer, lenp, ppos); - fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); - return 0; - } else + struct net *net; + int delay; + if (!write) return -EINVAL; + + net = (struct net *)ctl->extra1; + delay = net->ipv6.sysctl.flush_delay; + proc_dointvec(ctl, write, buffer, lenp, ppos); + fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); + return 0; } ctl_table ipv6_route_table_template[] = { @@ -2650,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) if (table) { table[0].data = &net->ipv6.sysctl.flush_delay; + table[0].extra1 = net; table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 8ce38f10a547..d2c16e10f650 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1290,4 +1290,4 @@ static int __init sit_init(void) module_init(sit_init); module_exit(sit_cleanup); MODULE_LICENSE("GPL"); -MODULE_ALIAS("sit0"); +MODULE_ALIAS_NETDEV("sit0"); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8acba456744e..7a10a8d1b2d0 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); + list_del(&unreg_list); } static u32 ieee80211_idle_off(struct ieee80211_local *local, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 45fbb9e33746..c9ceb4d57ab0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, if (is_multicast_ether_addr(hdr->addr1)) return; + /* + * In case we receive frames after disassociation. + */ + if (!sdata->u.mgd.associated) + return; + ieee80211_sta_reset_conn_monitor(sdata); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index cf68700abffa..d036597aabbe 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: changed |= BSS_CHANGED_ASSOC; + mutex_lock(&sdata->u.mgd.mtx); ieee80211_bss_info_change_notify(sdata, changed); + mutex_unlock(&sdata->u.mgd.mtx); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 32fcbe290c04..4aa614b8a96a 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head, /* Optimization: we don't need to hold module reference here, since function can't sleep. --RR */ +repeat: verdict = elem->hook(hook, skb, indev, outdev, okfn); if (verdict != NF_ACCEPT) { #ifdef CONFIG_NETFILTER_DEBUG @@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head, #endif if (verdict != NF_REPEAT) return verdict; - *i = (*i)->prev; + goto repeat; } } return NF_ACCEPT; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 22f7ad5101ab..ba98e1308f3c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, dest->u_threshold = udest->u_threshold; dest->l_threshold = udest->l_threshold; - spin_lock(&dest->dst_lock); + spin_lock_bh(&dest->dst_lock); ip_vs_dst_reset(dest); - spin_unlock(&dest->dst_lock); + spin_unlock_bh(&dest->dst_lock); if (add) ip_vs_new_estimator(&dest->stats); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index b07393eab88e..91816998ed86 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) { + if (pf >= ARRAY_SIZE(nf_loggers)) + return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); @@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(u_int8_t pf) { + if (pf >= ARRAY_SIZE(nf_loggers)) + return; mutex_lock(&nf_log_mutex); rcu_assign_pointer(nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c index 4d87befb04c0..474d621cbc2e 100644 --- a/net/netfilter/nf_tproxy_core.c +++ b/net/netfilter/nf_tproxy_core.c @@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb) skb->destructor = NULL; if (sk) - nf_tproxy_put_sock(sk); + sock_put(sk); } /* consumes sk */ -int +void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { - bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? - inet_twsk(sk)->tw_transparent : - inet_sk(sk)->transparent; - - if (transparent) { - skb_orphan(skb); - skb->sk = sk; - skb->destructor = nf_tproxy_destructor; - return 1; - } else - nf_tproxy_put_sock(sk); - - return 0; + /* assigning tw sockets complicates things; most + * skb->sk->X checks would have to test sk->sk_state first */ + if (sk->sk_state == TCP_TIME_WAIT) { + inet_twsk_put(inet_twsk(sk)); + return; + } + + skb_orphan(skb); + skb->sk = sk; + skb->destructor = nf_tproxy_destructor; } EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 640678f47a2a..dcfd57eb9d02 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -33,6 +33,20 @@ #include <net/netfilter/nf_tproxy_core.h> #include <linux/netfilter/xt_TPROXY.h> +static bool tproxy_sk_is_transparent(struct sock *sk) +{ + if (sk->sk_state != TCP_TIME_WAIT) { + if (inet_sk(sk)->transparent) + return true; + sock_put(sk); + } else { + if (inet_twsk(sk)->tw_transparent) + return true; + inet_twsk_put(inet_twsk(sk)); + } + return false; +} + static inline __be32 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) { @@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, skb->dev, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ - if (sk && nf_tproxy_assign_sock(skb, sk)) { + if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; @@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->daddr, ntohs(hp->dest), &laddr, ntohs(lport), skb->mark); + + nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } @@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) par->in, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ - if (sk && nf_tproxy_assign_sock(skb, sk)) { + if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; @@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), laddr, ntohs(lport), skb->mark); + + nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 00d6ae838303..9cc46356b577 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -35,6 +35,15 @@ #include <net/netfilter/nf_conntrack.h> #endif +static void +xt_socket_put_sk(struct sock *sk) +{ + if (sk->sk_state == TCP_TIME_WAIT) + inet_twsk_put(inet_twsk(sk)); + else + sock_put(sk); +} + static int extract_icmp4_fields(const struct sk_buff *skb, u8 *protocol, @@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, (sk->sk_state == TCP_TIME_WAIT && inet_twsk(sk)->tw_transparent)); - nf_tproxy_put_sock(sk); + xt_socket_put_sk(sk); if (wildcard || !transparent) sk = NULL; @@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) (sk->sk_state == TCP_TIME_WAIT && inet_twsk(sk)->tw_transparent)); - nf_tproxy_put_sock(sk); + xt_socket_put_sk(sk); if (wildcard || !transparent) sk = NULL; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 478181d53c55..1f924595bdef 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, int noblock = flags&MSG_DONTWAIT; size_t copied; struct sk_buff *skb, *data_skb; - int err; + int err, ret; if (flags&MSG_OOB) return -EOPNOTSUPP; @@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, skb_free_datagram(sk, skb); - if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) - netlink_dump(sk); + if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { + ret = netlink_dump(sk); + if (ret) { + sk->sk_err = ret; + sk->sk_error_report(sk); + } + } scm_recv(sock, msg, siocb->scm, flags); out: @@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, struct netlink_callback *cb; struct sock *sk; struct netlink_sock *nlk; + int ret; cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (cb == NULL) @@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, nlk->cb = cb; mutex_unlock(nlk->cb_mutex); - netlink_dump(sk); + ret = netlink_dump(sk); + sock_put(sk); + if (ret) + return ret; + /* We successfully started a dump, by returning -EINTR we * signal not to send ACK even if it was requested. */ diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 71f373c421bc..c47a511f203d 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */ diff --git a/net/rds/loop.c b/net/rds/loop.c index aeec1d483b17..bca6761a3ca2 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /* diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 89315009bab1..1a2b0633fece 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) goto protocol_error; } + case RXRPC_PACKET_TYPE_ACKALL: case RXRPC_PACKET_TYPE_ACK: /* ACK processing is done in process context */ read_lock_bh(&call->state_lock); diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 5ee16f0353fe..d763793d39de 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, return ret; plen -= sizeof(*token); - token = kmalloc(sizeof(*token), GFP_KERNEL); + token = kzalloc(sizeof(*token), GFP_KERNEL); if (!token) return -ENOMEM; - token->kad = kmalloc(plen, GFP_KERNEL); + token->kad = kzalloc(plen, GFP_KERNEL); if (!token->kad) { kfree(token); return -ENOMEM; @@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) goto error; ret = -ENOMEM; - token = kmalloc(sizeof(*token), GFP_KERNEL); + token = kzalloc(sizeof(*token), GFP_KERNEL); if (!token) goto error; - token->kad = kmalloc(plen, GFP_KERNEL); + token->kad = kzalloc(plen, GFP_KERNEL); if (!token->kad) goto error_free; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 34dc598440a2..1bc698039ae2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev) list_add(&dev->unreg_list, &single); dev_deactivate_many(&single); + list_del(&single); } static void dev_init_scheduler_queue(struct net_device *dev, diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2cc46f0962ca..b23428f3c0dd 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { - sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, - WORD_ROUND(ntohs(param.p->length))); - sctp_addto_chunk_fixed(*errp, - WORD_ROUND(ntohs(param.p->length)), - param.v); + if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, + WORD_ROUND(ntohs(param.p->length)))) + sctp_addto_chunk_fixed(*errp, + WORD_ROUND(ntohs(param.p->length)), + param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 243fc09b164e..59e599498e37 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) /* * Mark an RPC call as having completed by clearing the 'active' bit + * and then waking up all tasks that were sleeping. */ -static void rpc_mark_complete_task(struct rpc_task *task) +static int rpc_complete_task(struct rpc_task *task) { - smp_mb__before_clear_bit(); + void *m = &task->tk_runstate; + wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); + struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); + unsigned long flags; + int ret; + + spin_lock_irqsave(&wq->lock, flags); clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); - smp_mb__after_clear_bit(); - wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); + ret = atomic_dec_and_test(&task->tk_count); + if (waitqueue_active(wq)) + __wake_up_locked_key(wq, TASK_NORMAL, &k); + spin_unlock_irqrestore(&wq->lock, flags); + return ret; } /* * Allow callers to wait for completion of an RPC call + * + * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() + * to enforce taking of the wq->lock and hence avoid races with + * rpc_complete_task(). */ int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) { if (action == NULL) action = rpc_wait_bit_killable; - return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, + return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, action, TASK_KILLABLE); } EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); @@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work) rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); } -void rpc_put_task(struct rpc_task *task) +static void rpc_release_resources_task(struct rpc_task *task) { - if (!atomic_dec_and_test(&task->tk_count)) - return; - /* Release resources */ if (task->tk_rqstp) xprt_release(task); if (task->tk_msg.rpc_cred) put_rpccred(task->tk_msg.rpc_cred); rpc_task_release_client(task); - if (task->tk_workqueue != NULL) { +} + +static void rpc_final_put_task(struct rpc_task *task, + struct workqueue_struct *q) +{ + if (q != NULL) { INIT_WORK(&task->u.tk_work, rpc_async_release); - queue_work(task->tk_workqueue, &task->u.tk_work); + queue_work(q, &task->u.tk_work); } else rpc_free_task(task); } + +static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) +{ + if (atomic_dec_and_test(&task->tk_count)) { + rpc_release_resources_task(task); + rpc_final_put_task(task, q); + } +} + +void rpc_put_task(struct rpc_task *task) +{ + rpc_do_put_task(task, NULL); +} EXPORT_SYMBOL_GPL(rpc_put_task); +void rpc_put_task_async(struct rpc_task *task) +{ + rpc_do_put_task(task, task->tk_workqueue); +} +EXPORT_SYMBOL_GPL(rpc_put_task_async); + static void rpc_release_task(struct rpc_task *task) { dprintk("RPC: %5u release task\n", task->tk_pid); BUG_ON (RPC_IS_QUEUED(task)); - /* Wake up anyone who is waiting for task completion */ - rpc_mark_complete_task(task); + rpc_release_resources_task(task); - rpc_put_task(task); + /* + * Note: at this point we have been removed from rpc_clnt->cl_tasks, + * so it should be safe to use task->tk_count as a test for whether + * or not any other processes still hold references to our rpc_task. + */ + if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { + /* Wake up anyone who may be waiting for task completion */ + if (!rpc_complete_task(task)) + return; + } else { + if (!atomic_dec_and_test(&task->tk_count)) + return; + } + rpc_final_put_task(task, task->tk_workqueue); } int rpciod_up(void) diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 9df1eadc912a..1a10dcd999ea 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, p, 0, length, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { put_page(p); + svc_rdma_put_context(ctxt, 1); return; } atomic_inc(&xprt->sc_dma_used); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c431f5a57960..be96d429b475 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, } xs_reclassify_socket(family, sock); - if (xs_bind(transport, sock)) { + err = xs_bind(transport, sock); + if (err) { sock_release(sock); goto out; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index dd419d286204..437a99e560e1 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_namelen = 0; - mutex_lock(&u->readlock); + err = mutex_lock_interruptible(&u->readlock); + if (err) { + err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); + goto out; + } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { @@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, memset(&tmp_scm, 0, sizeof(tmp_scm)); } - mutex_lock(&u->readlock); + err = mutex_lock_interruptible(&u->readlock); + if (err) { + err = sock_intr_errno(timeo); + goto out; + } do { int chunk; @@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, timeo = unix_stream_data_wait(sk, timeo); - if (signal_pending(current)) { + if (signal_pending(current) + || mutex_lock_interruptible(&u->readlock)) { err = sock_intr_errno(timeo); goto out; } - mutex_lock(&u->readlock); + continue; unlock: unix_state_unlock(sk); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3e5dbd4e4cd5..d112f038edf0 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev, return freq; if (freq == 0) return -EINVAL; - wdev_lock(wdev); mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); - mutex_unlock(&rdev->devlist_mtx); wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); return err; default: return -EOPNOTSUPP; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8b3ef404c794..6459588befc3 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) default: BUG(); } - xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); + xdst = dst_alloc(dst_ops); xfrm_policy_put_afinfo(afinfo); - xdst->flo.ops = &xfrm_bundle_fc_ops; + if (likely(xdst)) + xdst->flo.ops = &xfrm_bundle_fc_ops; + else + xdst = ERR_PTR(-ENOBUFS); return xdst; } diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index c9a16abacab4..291228e25984 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c @@ -309,12 +309,18 @@ static void do_config_file(const char *filename) close(fd); } +/* + * Important: The below generated source_foo.o and deps_foo.o variable + * assignments are parsed not only by make, but also by the rather simple + * parser in scripts/mod/sumversion.c. + */ static void parse_dep_file(void *map, size_t len) { char *m = map; char *end = m + len; char *p; char s[PATH_MAX]; + int first; p = strchr(m, ':'); if (!p) { @@ -322,11 +328,11 @@ static void parse_dep_file(void *map, size_t len) exit(1); } memcpy(s, m, p-m); s[p-m] = 0; - printf("deps_%s := \\\n", target); m = p+1; clear_config(); + first = 1; while (m < end) { while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) m++; @@ -340,9 +346,20 @@ static void parse_dep_file(void *map, size_t len) if (strrcmp(s, "include/generated/autoconf.h") && strrcmp(s, "arch/um/include/uml-config.h") && strrcmp(s, ".ver")) { - printf(" %s \\\n", s); + /* + * Do not list the source file as dependency, so that + * kbuild is not confused if a .c file is rewritten + * into .S or vice versa. Storing it in source_* is + * needed for modpost to compute srcversions. + */ + if (first) { + printf("source_%s := %s\n\n", target, s); + printf("deps_%s := \\\n", target); + } else + printf(" %s \\\n", s); do_config_file(s); } + first = 0; m = p + 1; } printf("\n%s: $(deps_%s)\n\n", target, target); diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c index ecf9c7dc1825..9dfcd6d988da 100644 --- a/scripts/mod/sumversion.c +++ b/scripts/mod/sumversion.c @@ -300,8 +300,8 @@ static int is_static_library(const char *objfile) return 0; } -/* We have dir/file.o. Open dir/.file.o.cmd, look for deps_ line to - * figure out source file. */ +/* We have dir/file.o. Open dir/.file.o.cmd, look for source_ and deps_ line + * to figure out source files. */ static int parse_source_files(const char *objfile, struct md4_ctx *md) { char *cmd, *file, *line, *dir; @@ -340,6 +340,21 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md) */ while ((line = get_next_line(&pos, file, flen)) != NULL) { char* p = line; + + if (strncmp(line, "source_", sizeof("source_")-1) == 0) { + p = strrchr(line, ' '); + if (!p) { + warn("malformed line: %s\n", line); + goto out_file; + } + p++; + if (!parse_file(p, md)) { + warn("could not open %s: %s\n", + p, strerror(errno)); + goto out_file; + } + continue; + } if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { check_files = 1; continue; diff --git a/sound/core/jack.c b/sound/core/jack.c index 4902ae568730..53b53e97c896 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type, fail_input: input_free_device(jack->input_dev); + kfree(jack->id); kfree(jack); return err; } diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 23f49f356e0f..16c0bdfbb164 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; - int temp; + int temp, page, delta; temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); - temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); - return temp; + page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; + if (dma->nr_periods >= 4) + delta = (page - dma->period_real) & 3; + else { + delta = (page - dma->period_real); + if (delta < 0) + delta += dma->nr_periods; + } + return (dma->period_virt + delta) * dma->period_bytes + + (temp & (dma->period_bytes - 1)); } static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0baffcdee8f9..fcedad9a5fef 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index a07b031090d8..067982f4f182 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = { {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, {0x11, AC_VERB_SET_PROC_STATE, 0x00}, +#if 0 /* Don't to set to D3 as we are in power-up sequence */ {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ +#endif {} /* terminator */ }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fbe97d32140d..4d5004e693f0 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), @@ -3410,7 +3412,7 @@ static void cx_auto_parse_output(struct hda_codec *codec) } } spec->multiout.dac_nids = spec->private_dac_nids; - spec->multiout.max_channels = nums * 2; + spec->multiout.max_channels = spec->multiout.num_dacs * 2; if (cfg->hp_outs > 0) spec->auto_mute = 1; @@ -3729,9 +3731,9 @@ static int cx_auto_init(struct hda_codec *codec) return 0; } -static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, +static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename, const char *dir, int cidx, - hda_nid_t nid, int hda_dir) + hda_nid_t nid, int hda_dir, int amp_idx) { static char name[32]; static struct snd_kcontrol_new knew[] = { @@ -3743,7 +3745,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, for (i = 0; i < 2; i++) { struct snd_kcontrol *kctl; - knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); + knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx, + hda_dir); knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; knew[i].index = cidx; snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); @@ -3759,6 +3762,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, return 0; } +#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \ + cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0) + #define cx_auto_add_pb_volume(codec, nid, str, idx) \ cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) @@ -3808,29 +3814,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec) struct conexant_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; static const char *prev_label; - int i, err, cidx; + int i, err, cidx, conn_len; + hda_nid_t conn[HDA_MAX_CONNECTIONS]; + + int multi_adc_volume = 0; /* If the ADC nid has several input volumes */ + int adc_nid = spec->adc_nids[0]; + + conn_len = snd_hda_get_connections(codec, adc_nid, conn, + HDA_MAX_CONNECTIONS); + if (conn_len < 0) + return conn_len; + + multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1; + if (!multi_adc_volume) { + err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid, + HDA_INPUT); + if (err < 0) + return err; + } - err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0], - HDA_INPUT); - if (err < 0) - return err; prev_label = NULL; cidx = 0; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; const char *label; - if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) + int j; + int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP; + if (!pin_amp && !multi_adc_volume) continue; + label = hda_get_autocfg_input_label(codec, cfg, i); if (label == prev_label) cidx++; else cidx = 0; prev_label = label; - err = cx_auto_add_volume(codec, label, " Capture", cidx, - nid, HDA_INPUT); - if (err < 0) - return err; + + if (pin_amp) { + err = cx_auto_add_volume(codec, label, " Boost", cidx, + nid, HDA_INPUT); + if (err < 0) + return err; + } + + if (!multi_adc_volume) + continue; + for (j = 0; j < conn_len; j++) { + if (conn[j] == nid) { + err = cx_auto_add_volume_idx(codec, label, + " Capture", cidx, adc_nid, HDA_INPUT, j); + if (err < 0) + return err; + break; + } + } } return 0; } @@ -3902,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = { .patch = patch_cxt5066 }, { .id = 0x14f15069, .name = "CX20585", .patch = patch_cxt5066 }, + { .id = 0x14f1506e, .name = "CX20590", + .patch = patch_cxt5066 }, { .id = 0x14f15097, .name = "CX20631", .patch = patch_conexant_auto }, { .id = 0x14f15098, .name = "CX20632", @@ -3928,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066"); MODULE_ALIAS("snd-hda-codec-id:14f15067"); MODULE_ALIAS("snd-hda-codec-id:14f15068"); MODULE_ALIAS("snd-hda-codec-id:14f15069"); +MODULE_ALIAS("snd-hda-codec-id:14f1506e"); MODULE_ALIAS("snd-hda-codec-id:14f15097"); MODULE_ALIAS("snd-hda-codec-id:14f15098"); MODULE_ALIAS("snd-hda-codec-id:14f150a1"); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index a58767736727..ec0fa2dd0a27 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1634,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, +{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, +{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, +/* 17 is known to be absent */ { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, @@ -1676,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011"); MODULE_ALIAS("snd-hda-codec-id:10de0012"); MODULE_ALIAS("snd-hda-codec-id:10de0013"); MODULE_ALIAS("snd-hda-codec-id:10de0014"); +MODULE_ALIAS("snd-hda-codec-id:10de0015"); +MODULE_ALIAS("snd-hda-codec-id:10de0016"); MODULE_ALIAS("snd-hda-codec-id:10de0018"); MODULE_ALIAS("snd-hda-codec-id:10de0019"); MODULE_ALIAS("snd-hda-codec-id:10de001a"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3328a259a242..4261bb8eec1d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl) nid = spec->autocfg.hp_pins[i]; if (!nid) break; - if (snd_hda_jack_detect(codec, nid)) { - spec->jack_present = 1; - break; - } - alc_report_jack(codec, spec->autocfg.hp_pins[i]); + alc_report_jack(codec, nid); + spec->jack_present |= snd_hda_jack_detect(codec, nid); } mute = spec->jack_present ? HDA_AMP_MUTE : 0; @@ -15015,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = { SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), - SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), + SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC), SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9ea48b425d0b..bd7b123f6440 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = { 0x0f, 0x10, 0x11, 0x1f, 0x20, }; -static hda_nid_t stac92hd88xxx_pin_nids[10] = { +static hda_nid_t stac92hd87xxx_pin_nids[6] = { + 0x0a, 0x0b, 0x0c, 0x0d, + 0x0f, 0x11, +}; + +static hda_nid_t stac92hd88xxx_pin_nids[8] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0f, 0x11, 0x1f, 0x20, }; @@ -5430,12 +5435,13 @@ again: switch (codec->vendor_id) { case 0x111d76d1: case 0x111d76d9: + case 0x111d76e5: spec->dmic_nids = stac92hd87b_dmic_nids; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd87b_dmic_nids, STAC92HD87B_NUM_DMICS); - spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); - spec->pin_nids = stac92hd88xxx_pin_nids; + spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids); + spec->pin_nids = stac92hd87xxx_pin_nids; spec->mono_nid = 0; spec->num_pwrs = 0; break; @@ -5443,6 +5449,7 @@ again: case 0x111d7667: case 0x111d7668: case 0x111d7669: + case 0x111d76e3: spec->num_dmics = stac92xx_connected_ports(codec, stac92hd88xxx_dmic_nids, STAC92HD88XXX_NUM_DMICS); @@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = { { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, + { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, + { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, {} /* terminator */ }; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a76c3260d941..63b0054200a8 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec) hda_nid_t nid = cfg->inputs[i].pin; if (spec->smart51_enabled && is_smart51_pins(spec, nid)) ctl = PIN_OUT; - else if (i == AUTO_PIN_MIC) + else if (cfg->inputs[i].type == AUTO_PIN_MIC) ctl = PIN_VREF50; else ctl = PIN_IN; diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c index bb4bf65b9e7e..0bb424af956f 100644 --- a/sound/soc/codecs/cx20442.c +++ b/sound/soc/codecs/cx20442.c @@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec) return 0; } -static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC; +static const u8 cx20442_reg; static struct snd_soc_codec_driver cx20442_codec_dev = { .probe = cx20442_codec_probe, diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 987476a5895f..017d99ceb42e 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, irq_mask); - if (det && shrt) { + if (det || shrt) { /* Enable mic detection, this may not have been set through * platform data (eg, if the defaults are OK). */ snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h index e8490f3edd03..e3ec2433b215 100644 --- a/sound/soc/codecs/wm8903.h +++ b/sound/soc/codecs/wm8903.h @@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, #define WM8903_VMID_RES_50K 2 #define WM8903_VMID_RES_250K 3 -#define WM8903_VMID_RES_5K 4 +#define WM8903_VMID_RES_5K 6 /* * R8 (0x08) - Analogue DAC 0 diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c index 4bbc3442703f..8dfb0a0da673 100644 --- a/sound/soc/codecs/wm8978.c +++ b/sound/soc/codecs/wm8978.c @@ -145,18 +145,18 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = { SOC_SINGLE("DAC Playback Limiter Threshold", WM8978_DAC_LIMITER_2, 4, 7, 0), SOC_SINGLE("DAC Playback Limiter Boost", - WM8978_DAC_LIMITER_2, 0, 15, 0), + WM8978_DAC_LIMITER_2, 0, 12, 0), SOC_ENUM("ALC Enable Switch", alc1), SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0), - SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0), + SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0), SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0), SOC_ENUM("ALC Capture Mode", alc3), - SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0), - SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0), + SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0), + SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0), SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0), SOC_SINGLE("ALC Capture Noise Gate Threshold", @@ -211,8 +211,10 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = { WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1), /* DAC / ADC oversampling */ - SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0), - SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0), + SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, + 5, 1, 0), + SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, + 5, 1, 0), }; /* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */ diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 37b8aa8a680f..c6c958ee5d59 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -107,6 +107,12 @@ struct wm8994_priv { int revision; struct wm8994_pdata *pdata; + + unsigned int aif1clk_enable:1; + unsigned int aif2clk_enable:1; + + unsigned int aif1clk_disable:1; + unsigned int aif2clk_disable:1; }; static int wm8994_readable(unsigned int reg) @@ -1004,6 +1010,110 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec) } } +static int late_enable_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + if (wm8994->aif1clk_enable) { + snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, + WM8994_AIF1CLK_ENA_MASK, + WM8994_AIF1CLK_ENA); + wm8994->aif1clk_enable = 0; + } + if (wm8994->aif2clk_enable) { + snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, + WM8994_AIF2CLK_ENA_MASK, + WM8994_AIF2CLK_ENA); + wm8994->aif2clk_enable = 0; + } + break; + } + + return 0; +} + +static int late_disable_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_POST_PMD: + if (wm8994->aif1clk_disable) { + snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, + WM8994_AIF1CLK_ENA_MASK, 0); + wm8994->aif1clk_disable = 0; + } + if (wm8994->aif2clk_disable) { + snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, + WM8994_AIF2CLK_ENA_MASK, 0); + wm8994->aif2clk_disable = 0; + } + break; + } + + return 0; +} + +static int aif1clk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wm8994->aif1clk_enable = 1; + break; + case SND_SOC_DAPM_POST_PMD: + wm8994->aif1clk_disable = 1; + break; + } + + return 0; +} + +static int aif2clk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wm8994->aif2clk_enable = 1; + break; + case SND_SOC_DAPM_POST_PMD: + wm8994->aif2clk_disable = 1; + break; + } + + return 0; +} + +static int adc_mux_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + late_enable_ev(w, kcontrol, event); + return 0; +} + +static int dac_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + unsigned int mask = 1 << w->shift; + + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + mask, mask); + return 0; +} + static const char *hp_mux_text[] = { "Mixer", "DAC", @@ -1272,6 +1382,59 @@ static const struct soc_enum aif2dacr_src_enum = static const struct snd_kcontrol_new aif2dacr_src_mux = SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); +static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = { +SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + +SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), + +SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) +}; + +static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { +SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0) +}; + +static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = { +SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +}; + +static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = { +SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), +SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), +SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), +SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), +}; + +static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { +SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), +}; + +static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { +SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), +SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), +}; + static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { SND_SOC_DAPM_INPUT("DMIC1DAT"), SND_SOC_DAPM_INPUT("DMIC2DAT"), @@ -1284,9 +1447,6 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0), - SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, 0, WM8994_POWER_MANAGEMENT_4, 9, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, @@ -1369,14 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0), SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), -SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), -SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), - -SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), -SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), -SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), -SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), - SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), @@ -1516,14 +1668,12 @@ static const struct snd_soc_dapm_route intercon[] = { { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, /* DAC1 inputs */ - { "DAC1L", NULL, "DAC1L Mixer" }, { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, - { "DAC1R", NULL, "DAC1R Mixer" }, { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, @@ -1532,7 +1682,6 @@ static const struct snd_soc_dapm_route intercon[] = { /* DAC2/AIF2 outputs */ { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, - { "DAC2L", NULL, "AIF2DAC2L Mixer" }, { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, @@ -1540,7 +1689,6 @@ static const struct snd_soc_dapm_route intercon[] = { { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, - { "DAC2R", NULL, "AIF2DAC2R Mixer" }, { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, @@ -1584,6 +1732,24 @@ static const struct snd_soc_dapm_route intercon[] = { { "Right Headphone Mux", "DAC", "DAC1R" }, }; +static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = { + { "DAC1L", NULL, "Late DAC1L Enable PGA" }, + { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" }, + { "DAC1R", NULL, "Late DAC1R Enable PGA" }, + { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" }, + { "DAC2L", NULL, "Late DAC2L Enable PGA" }, + { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" }, + { "DAC2R", NULL, "Late DAC2R Enable PGA" }, + { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" } +}; + +static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = { + { "DAC1L", NULL, "DAC1L Mixer" }, + { "DAC1R", NULL, "DAC1R Mixer" }, + { "DAC2L", NULL, "AIF2DAC2L Mixer" }, + { "DAC2R", NULL, "AIF2DAC2R Mixer" }, +}; + static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { { "AIF1DACDAT", NULL, "AIF2DACDAT" }, { "AIF2DACDAT", NULL, "AIF1DACDAT" }, @@ -2514,6 +2680,22 @@ static int wm8994_resume(struct snd_soc_codec *codec) { struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); int i, ret; + unsigned int val, mask; + + if (wm8994->revision < 4) { + /* force a HW read */ + val = wm8994_reg_read(codec->control_data, + WM8994_POWER_MANAGEMENT_5); + + /* modify the cache only */ + codec->cache_only = 1; + mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA | + WM8994_DAC2R_ENA | WM8994_DAC2L_ENA; + val &= mask; + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + mask, val); + codec->cache_only = 0; + } /* Restore the registers */ ret = snd_soc_cache_sync(codec); @@ -2847,11 +3029,10 @@ static void wm8958_default_micdet(u16 status, void *data) report |= SND_JACK_BTN_5; done: - snd_soc_jack_report(wm8994->micdet[0].jack, + snd_soc_jack_report(wm8994->micdet[0].jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | - SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, - report); + SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT); } /** @@ -3125,10 +3306,31 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) case WM8994: snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, ARRAY_SIZE(wm8994_specific_dapm_widgets)); + if (wm8994->revision < 4) { + snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, + ARRAY_SIZE(wm8994_lateclk_revd_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets, + ARRAY_SIZE(wm8994_adc_revd_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, + ARRAY_SIZE(wm8994_dac_revd_widgets)); + } else { + snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, + ARRAY_SIZE(wm8994_lateclk_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, + ARRAY_SIZE(wm8994_adc_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, + ARRAY_SIZE(wm8994_dac_widgets)); + } break; case WM8958: snd_soc_add_controls(codec, wm8958_snd_controls, ARRAY_SIZE(wm8958_snd_controls)); + snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, + ARRAY_SIZE(wm8994_lateclk_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, + ARRAY_SIZE(wm8994_adc_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, + ARRAY_SIZE(wm8994_dac_widgets)); snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets, ARRAY_SIZE(wm8958_dapm_widgets)); break; @@ -3143,12 +3345,19 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) snd_soc_dapm_add_routes(dapm, wm8994_intercon, ARRAY_SIZE(wm8994_intercon)); - if (wm8994->revision < 4) + if (wm8994->revision < 4) { snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, ARRAY_SIZE(wm8994_revd_intercon)); - + snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, + ARRAY_SIZE(wm8994_lateclk_revd_intercon)); + } else { + snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon, + ARRAY_SIZE(wm8994_lateclk_intercon)); + } break; case WM8958: + snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon, + ARRAY_SIZE(wm8994_lateclk_intercon)); snd_soc_dapm_add_routes(dapm, wm8958_intercon, ARRAY_SIZE(wm8958_intercon)); break; diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c index 43825b2102a5..cce704c275c6 100644 --- a/sound/soc/codecs/wm9081.c +++ b/sound/soc/codecs/wm9081.c @@ -15,6 +15,7 @@ #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> +#include <linux/device.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> @@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c, wm9081->control_type = SND_SOC_I2C; wm9081->control_data = i2c; + if (dev_get_platdata(&i2c->dev)) + memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev), + sizeof(wm9081->retune)); + ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm9081, &wm9081_dai, 1); if (ret < 0) diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 613df5db0b32..516892706063 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"), }; static const struct snd_soc_dapm_route analogue_routes[] = { + { "MICBIAS1", NULL, "CLK_SYS" }, + { "MICBIAS2", NULL, "CLK_SYS" }, + { "IN1L PGA", "IN1LP Switch", "IN1LP" }, { "IN1L PGA", "IN1LN Switch", "IN1LN" }, diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index e20c9e1457c0..1e9bccae4e80 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c @@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = { .name = "tlv320aic23", .stream_name = "TLV320AIC23", .codec_dai_name = "tlv320aic23-hifi", - .platform_name = "imx-pcm-audio.0", + .platform_name = "imx-fiq-pcm-audio.0", .codec_name = "tlv320aic23-codec.0-001a", .cpu_dai_name = "imx-ssi.0", .ops = &eukrea_tlv320_snd_ops, diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c index 161750443ebc..73dde4a1adc3 100644 --- a/sound/soc/omap/am3517evm.c +++ b/sound/soc/omap/am3517evm.c @@ -139,7 +139,7 @@ static struct snd_soc_dai_link am3517evm_dai = { .cpu_dai_name ="omap-mcbsp-dai.0", .codec_dai_name = "tlv320aic23-hifi", .platform_name = "omap-pcm-audio", - .codec_name = "tlv320aic23-codec", + .codec_name = "tlv320aic23-codec.2-001a", .init = am3517evm_aic23_init, .ops = &am3517evm_ops, }; diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c index 28333e7d9c50..dc65650a6fa1 100644 --- a/sound/soc/pxa/e740_wm9705.c +++ b/sound/soc/pxa/e740_wm9705.c @@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9705-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", @@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9705-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c index 01bf31675c55..51897fcd911b 100644 --- a/sound/soc/pxa/e750_wm9705.c +++ b/sound/soc/pxa/e750_wm9705.c @@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9705-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", @@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9705-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c index c6a37c6ef23b..053ed208e59f 100644 --- a/sound/soc/pxa/e800_wm9712.c +++ b/sound/soc/pxa/e800_wm9712.c @@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", @@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c index fc22e6eefc98..b13a4252812d 100644 --- a/sound/soc/pxa/em-x270.c +++ b/sound/soc/pxa/em-x270.c @@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", @@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c index 0d70fc8c12bd..38ca6759907e 100644 --- a/sound/soc/pxa/mioa701_wm9713.c +++ b/sound/soc/pxa/mioa701_wm9713.c @@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9713-hifi", .codec_name = "wm9713-codec", .init = mioa701_wm9713_init, @@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9713-aux", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c index 857db96d4a4f..504e4004f004 100644 --- a/sound/soc/pxa/palm27x.c +++ b/sound/soc/pxa/palm27x.c @@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { { .name = "AC97 HiFi", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .codec_name = "wm9712-codec", .platform_name = "pxa-pcm-audio", @@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9712-aux", .codec_name = "wm9712-codec", .platform_name = "pxa-pcm-audio", diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index f75804ef0897..4b6e5d608b42 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c @@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", @@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b222a7d72027..25bba108fea3 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c @@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { .stream_name = "AC97 HiFi", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_name = "wm9713-hifi", .init = zylonite_wm9713_init, }, @@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { .stream_name = "AC97 Aux", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_name = "wm9713-aux", }, { diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 8194f150bab7..1790f83ee665 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) !path->connected(path->source, path->sink)) continue; - if (path->sink && path->sink->power_check && + if (!path->sink) + continue; + + if (path->sink->force) { + power = 1; + break; + } + + if (path->sink->power_check && path->sink->power_check(path->sink)) { power = 1; break; @@ -933,7 +941,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm, } if (!list_empty(&pending)) - dapm_seq_run_coalesced(dapm, &pending); + dapm_seq_run_coalesced(cur_dapm, &pending); } static void dapm_widget_update(struct snd_soc_dapm_context *dapm) @@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w; + unsigned int val; list_for_each_entry(w, &dapm->card->widgets, list) { @@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) case snd_soc_dapm_post: break; } + + /* Read the initial power state from the device */ + if (w->reg >= 0) { + val = snd_soc_read(w->codec, w->reg); + val &= 1 << w->shift; + if (w->invert) + val = !val; + + if (val) + w->power = 1; + } + w->new = 1; } diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 68b97477577b..66eabafb1c24 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) } dev->pcm->private_data = dev; - strcpy(dev->pcm->name, dev->product_name); + strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c77fff2..a1a47088fd0c 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c @@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) if (ret < 0) return ret; - strcpy(rmidi->name, device->product_name); + strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = device; diff --git a/sound/usb/card.c b/sound/usb/card.c index 800f7cb4f251..c0f8270bc199 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, return -ENOMEM; } + mutex_init(&chip->shutdown_mutex); chip->index = idx; chip->dev = dev; chip->card = card; @@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) chip = ptr; card = chip->card; mutex_lock(®ister_mutex); + mutex_lock(&chip->shutdown_mutex); chip->shutdown = 1; chip->num_interfaces--; if (chip->num_interfaces <= 0) { @@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; + mutex_unlock(&chip->shutdown_mutex); mutex_unlock(®ister_mutex); snd_card_free_when_closed(card); } else { + mutex_unlock(&chip->shutdown_mutex); mutex_unlock(®ister_mutex); } } diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 4132522ac90f..e3f680526cb5 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, } if (changed) { + mutex_lock(&subs->stream->chip->shutdown_mutex); /* format changed */ snd_usb_release_substream_urbs(subs, 0); /* influenced: period_bytes, channels, rate, format, */ @@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, params_rate(hw_params), snd_pcm_format_physical_width(params_format(hw_params)) * params_channels(hw_params)); + mutex_unlock(&subs->stream->chip->shutdown_mutex); } return ret; @@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; - if (!subs->stream->chip->shutdown) - snd_usb_release_substream_urbs(subs, 0); + mutex_lock(&subs->stream->chip->shutdown_mutex); + snd_usb_release_substream_urbs(subs, 0); + mutex_unlock(&subs->stream->chip->shutdown_mutex); return snd_pcm_lib_free_vmalloc_buffer(substream); } diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index db3eb21627ee..6e66fffe87f5 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -36,6 +36,7 @@ struct snd_usb_audio { struct snd_card *card; u32 usb_id; int shutdown; + struct mutex shutdown_mutex; unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ int num_interfaces; int num_suspended_intf; diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 746cf03cb05d..0ace786e83e0 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) c->start_time = start; if (p->start_time == 0 || p->start_time > start) p->start_time = start; - - if (cpu > numcpus) - numcpus = cpu; } #define MAX_CPUS 4096 @@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used, if (!event_str) return 0; + if (sample->cpu > numcpus) + numcpus = sample->cpu; + if (strcmp(event_str, "power:cpu_idle") == 0) { struct power_processor_entry *ppe = (void *)te; if (ppe->state == (u32)PWR_EVENT_EXIT) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index f6a929e74981..0866bcdb5e8e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -270,11 +270,15 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, const char *name, bool is_kallsyms) { const size_t size = PATH_MAX; - char *realname = realpath(name, NULL), - *filename = malloc(size), + char *realname, *filename = malloc(size), *linkname = malloc(size), *targetname; int len, err = -1; + if (is_kallsyms) + realname = (char *)name; + else + realname = realpath(name, NULL); + if (realname == NULL || filename == NULL || linkname == NULL) goto out_free; @@ -306,7 +310,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, if (symlink(targetname, linkname) == 0) err = 0; out_free: - free(realname); + if (!is_kallsyms) + free(realname); free(filename); free(linkname); return err; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 32f4f1f2f6e4..df51560f16f7 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, { struct sort_entry *se; u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; + u64 nr_events; const char *sep = symbol_conf.field_sep; int ret; @@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, if (pair_hists) { period = self->pair ? self->pair->period : 0; + nr_events = self->pair ? self->pair->nr_events : 0; total = pair_hists->stats.total_period; period_sys = self->pair ? self->pair->period_sys : 0; period_us = self->pair ? self->pair->period_us : 0; @@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, period_guest_us = self->pair ? self->pair->period_guest_us : 0; } else { period = self->period; + nr_events = self->nr_events; total = session_total; period_sys = self->period_sys; period_us = self->period_us; @@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, if (symbol_conf.show_nr_samples) { if (sep) - ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); + ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); else - ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); + ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events); } if (pair_hists) { diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index fb737fe9be91..96c866045d60 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -456,9 +456,9 @@ void svg_legenda(void) return; svg_legenda_box(0, "Running", "sample"); - svg_legenda_box(100, "Idle","rect.c1"); - svg_legenda_box(200, "Deeper Idle", "rect.c3"); - svg_legenda_box(350, "Deepest Idle", "rect.c6"); + svg_legenda_box(100, "Idle","c1"); + svg_legenda_box(200, "Deeper Idle", "c3"); + svg_legenda_box(350, "Deepest Idle", "c6"); svg_legenda_box(550, "Sleeping", "process2"); svg_legenda_box(650, "Waiting for cpu", "waiting"); svg_legenda_box(800, "Blocked on IO", "blocked"); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 7821d0e6866f..b1bf490aff88 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1836,7 +1836,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map, int err = -1, fd; char symfs_vmlinux[PATH_MAX]; - snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", + snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", symbol_conf.symfs, vmlinux); fd = open(symfs_vmlinux, O_RDONLY); if (fd < 0) |