diff options
1447 files changed, 17242 insertions, 10191 deletions
@@ -1510,6 +1510,14 @@ D: Natsemi ethernet D: Cobalt Networks (x86) support D: This-and-That +N: Mark M. Hoffman +E: mhoffman@lightlink.com +D: asb100, lm93 and smsc47b397 hardware monitoring drivers +D: hwmon subsystem core +D: hwmon subsystem maintainer +D: i2c-sis96x and i2c-stub SMBus drivers +S: USA + N: Dirk Hohndel E: hohndel@suse.de D: The XFree86[tm] Project diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index 7514dbf0a679..c36892c072da 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -227,7 +227,7 @@ X!Isound/sound_firmware.c <chapter id="uart16x50"> <title>16x50 UART Driver</title> !Edrivers/tty/serial/serial_core.c -!Edrivers/tty/serial/8250/8250.c +!Edrivers/tty/serial/8250/8250_core.c </chapter> <chapter id="fbdev"> diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt index 13b707b7355c..c3a14e0ad0ad 100644 --- a/Documentation/devicetree/bindings/mfd/ab8500.txt +++ b/Documentation/devicetree/bindings/mfd/ab8500.txt @@ -13,9 +13,6 @@ Required parent device properties: 4 = active high level-sensitive 8 = active low level-sensitive -Optional parent device properties: -- reg : contains the PRCMU mailbox address for the AB8500 i2c port - The AB8500 consists of a large and varied group of sub-devices: Device IRQ Names Supply Names Description @@ -86,9 +83,8 @@ Non-standard child device properties: - stericsson,amic2-bias-vamic1 : Analoge Mic wishes to use a non-standard Vamic - stericsson,earpeice-cmv : Earpeice voltage (only: 950 | 1100 | 1270 | 1580) -ab8500@5 { +ab8500 { compatible = "stericsson,ab8500"; - reg = <5>; /* mailbox 5 is i2c */ interrupts = <0 40 0x4>; interrupt-controller; #interrupt-cells = <2>; diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt index 1e1145ca4f3c..8f01cb190f25 100644 --- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt @@ -11,6 +11,9 @@ Required properties: - "nvidia,tegra20-uart" - "nxp,lpc3220-uart" - "ibm,qpace-nwp-serial" + - "altr,16550-FIFO32" + - "altr,16550-FIFO64" + - "altr,16550-FIFO128" - "serial" if the port type is unknown. - reg : offset and length of the register set for the device. - interrupts : should contain uart interrupt. diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75 index c91a1d15fa28..69af1c7db6b7 100644 --- a/Documentation/hwmon/lm75 +++ b/Documentation/hwmon/lm75 @@ -23,7 +23,7 @@ Supported chips: Datasheet: Publicly available at the Maxim website http://www.maxim-ic.com/ * Microchip (TelCom) TCN75 - Prefix: 'lm75' + Prefix: 'tcn75' Addresses scanned: none Datasheet: Publicly available at the Microchip website http://www.microchip.com/ diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c index 30fe4bb9a069..0d6018c316c7 100644 --- a/Documentation/i2c/busses/i2c-diolan-u2c +++ b/Documentation/i2c/busses/i2c-diolan-u2c @@ -5,7 +5,7 @@ Supported adapters: Documentation: http://www.diolan.com/i2c/u2c12.html -Author: Guenter Roeck <guenter.roeck@ericsson.com> +Author: Guenter Roeck <linux@roeck-us.net> Description ----------- diff --git a/Documentation/ia64/err_inject.txt b/Documentation/ia64/err_inject.txt index 223e4f0582d0..9f651c181429 100644 --- a/Documentation/ia64/err_inject.txt +++ b/Documentation/ia64/err_inject.txt @@ -882,7 +882,7 @@ int err_inj() cpu=parameters[i].cpu; k = cpu%64; j = cpu/64; - mask[j]=1<<k; + mask[j] = 1UL << k; if (sched_setaffinity(0, MASK_SIZE*8, mask)==-1) { perror("Error sched_setaffinity:"); diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index 3262b6e4d686..e544c7ff8cfa 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt @@ -3,10 +3,26 @@ ALPS Touchpad Protocol Introduction ------------ - -Currently the ALPS touchpad driver supports four protocol versions in use by -ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various -protocol versions is contained in the following sections. +Currently the ALPS touchpad driver supports five protocol versions in use by +ALPS touchpads, called versions 1, 2, 3, 4 and 5. + +Since roughly mid-2010 several new ALPS touchpads have been released and +integrated into a variety of laptops and netbooks. These new touchpads +have enough behavior differences that the alps_model_data definition +table, describing the properties of the different versions, is no longer +adequate. The design choices were to re-define the alps_model_data +table, with the risk of regression testing existing devices, or isolate +the new devices outside of the alps_model_data table. The latter design +choice was made. The new touchpad signatures are named: "Rushmore", +"Pinnacle", and "Dolphin", which you will see in the alps.c code. +For the purposes of this document, this group of ALPS touchpads will +generically be called "new ALPS touchpads". + +We experimented with probing the ACPI interface _HID (Hardware ID)/_CID +(Compatibility ID) definition as a way to uniquely identify the +different ALPS variants but there did not appear to be a 1:1 mapping. +In fact, it appeared to be an m:n mapping between the _HID and actual +hardware type. Detection --------- @@ -20,9 +36,13 @@ If the E6 report is successful, the touchpad model is identified using the "E7 report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is matched against known models in the alps_model_data_array. -With protocol versions 3 and 4, the E7 report model signature is always -73-02-64. To differentiate between these versions, the response from the -"Enter Command Mode" sequence must be inspected as described below. +For older touchpads supporting protocol versions 3 and 4, the E7 report +model signature is always 73-02-64. To differentiate between these +versions, the response from the "Enter Command Mode" sequence must be +inspected as described below. + +The new ALPS touchpads have an E7 signature of 73-03-50 or 73-03-0A but +seem to be better differentiated by the EC Command Mode response. Command Mode ------------ @@ -47,6 +67,14 @@ address of the register being read, and the third contains the value of the register. Registers are written by writing the value one nibble at a time using the same encoding used for addresses. +For the new ALPS touchpads, the EC command is used to enter command +mode. The response in the new ALPS touchpads is significantly different, +and more important in determining the behavior. This code has been +separated from the original alps_model_data table and put in the +alps_identify function. For example, there seem to be two hardware init +sequences for the "Dolphin" touchpads as determined by the second byte +of the EC response. + Packet Format ------------- @@ -187,3 +215,28 @@ There are several things worth noting here. well. So far no v4 devices with tracksticks have been encountered. + +ALPS Absolute Mode - Protocol Version 5 +--------------------------------------- +This is basically Protocol Version 3 but with different logic for packet +decode. It uses the same alps_process_touchpad_packet_v3 call with a +specialized decode_fields function pointer to correctly interpret the +packets. This appears to only be used by the Dolphin devices. + +For single-touch, the 6-byte packet format is: + + byte 0: 1 1 0 0 1 0 0 0 + byte 1: 0 x6 x5 x4 x3 x2 x1 x0 + byte 2: 0 y6 y5 y4 y3 y2 y1 y0 + byte 3: 0 M R L 1 m r l + byte 4: y10 y9 y8 y7 x10 x9 x8 x7 + byte 5: 0 z6 z5 z4 z3 z2 z1 z0 + +For mt, the format is: + + byte 0: 1 1 1 n3 1 n2 n1 x24 + byte 1: 1 y7 y6 y5 y4 y3 y2 y1 + byte 2: ? x2 x1 y12 y11 y10 y9 y8 + byte 3: 0 x23 x22 x21 x20 x19 x18 x17 + byte 4: 0 x9 x8 x7 x6 x5 x4 x3 + byte 5: 0 x16 x15 x14 x13 x12 x11 x10 diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index 13f1aa09b938..9c7fd988e299 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt @@ -297,6 +297,7 @@ Boot into System Kernel On ia64, 256M@256M is a generous value that typically works. The region may be automatically placed on ia64, see the dump-capture kernel config option notes above. + If use sparse memory, the size should be rounded to GRANULE boundaries. On s390x, typically use "crashkernel=xxM". The value of xx is dependent on the memory consumption of the kdump system. In general this is not diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4609e81dbc37..8ccbf27aead4 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. is selected automatically. Check Documentation/kdump/kdump.txt for further details. - crashkernel_low=size[KMG] - [KNL, x86] parts under 4G. - crashkernel=range1:size1[,range2:size2,...][@offset] [KNL] Same as above, but depends on the memory in the running system. The syntax of range is @@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted. a memory unit (amount[KMG]). See also Documentation/kdump/kdump.txt for an example. + crashkernel=size[KMG],high + [KNL, x86_64] range could be above 4G. Allow kernel + to allocate physical memory region from top, so could + be above 4G if system have more than 4G ram installed. + Otherwise memory region will be allocated below 4G, if + available. + It will be ignored if crashkernel=X is specified. + crashkernel=size[KMG],low + [KNL, x86_64] range under 4G. When crashkernel=X,high + is passed, kernel could allocate physical memory region + above 4G, that cause second kernel crash on system + that require some amount of low memory, e.g. swiotlb + requires at least 64M+32K low memory. Kernel would + try to allocate 72M below 4G automatically. + This one let user to specify own low range under 4G + for second kernel instead. + 0: to disable low allocation. + It will be ignored when crashkernel=X,high is not used + or memory reserved is below 4G. + cs89x0_dma= [HW,NET] Format: <dma> @@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. edd= [EDD] Format: {"off" | "on" | "skip[mbr]"} + efi_no_storage_paranoia [EFI; X86] + Using this parameter you can use more than 50% of + your efi variable storage. Use this parameter only if + you are really sure that your UEFI does sane gc and + fulfills the spec otherwise your board may brick. + eisa_irq_edge= [PARISC,HW] See header of drivers/parisc/eisa.c. diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt index f2a2488f1bf3..9573d0c48c6e 100644 --- a/Documentation/networking/ipvs-sysctl.txt +++ b/Documentation/networking/ipvs-sysctl.txt @@ -15,6 +15,13 @@ amemthresh - INTEGER enabled and the variable is automatically set to 2, otherwise the strategy is disabled and the variable is set to 1. +backup_only - BOOLEAN + 0 - disabled (default) + not 0 - enabled + + If set, disable the director function while the server is + in backup mode to avoid packet loops for DR/TUN methods. + conntrack - BOOLEAN 0 - disabled (default) not 0 - enabled diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt index c0aab985bad9..949d5dcdd9a3 100644 --- a/Documentation/networking/tuntap.txt +++ b/Documentation/networking/tuntap.txt @@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com> Proto [2 bytes] Raw protocol(IP, IPv6, etc) frame. + 3.3 Multiqueue tuntap interface: + + From version 3.8, Linux supports multiqueue tuntap which can uses multiple + file descriptors (queues) to parallelize packets sending or receiving. The + device allocation is the same as before, and if user wants to create multiple + queues, TUNSETIFF with the same device name must be called many times with + IFF_MULTI_QUEUE flag. + + char *dev should be the name of the device, queues is the number of queues to + be created, fds is used to store and return the file descriptors (queues) + created to the caller. Each file descriptor were served as the interface of a + queue which could be accessed by userspace. + + #include <linux/if.h> + #include <linux/if_tun.h> + + int tun_alloc_mq(char *dev, int queues, int *fds) + { + struct ifreq ifr; + int fd, err, i; + + if (!dev) + return -1; + + memset(&ifr, 0, sizeof(ifr)); + /* Flags: IFF_TUN - TUN device (no Ethernet headers) + * IFF_TAP - TAP device + * + * IFF_NO_PI - Do not provide packet information + * IFF_MULTI_QUEUE - Create a queue of multiqueue device + */ + ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE; + strcpy(ifr.ifr_name, dev); + + for (i = 0; i < queues; i++) { + if ((fd = open("/dev/net/tun", O_RDWR)) < 0) + goto err; + err = ioctl(fd, TUNSETIFF, (void *)&ifr); + if (err) { + close(fd); + goto err; + } + fds[i] = fd; + } + + return 0; + err: + for (--i; i >= 0; i--) + close(fds[i]); + return err; + } + + A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When + calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when + calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were + enabled by default after it was created through TUNSETIFF. + + fd is the file descriptor (queue) that we want to enable or disable, when + enable is true we enable it, otherwise we disable it + + #include <linux/if.h> + #include <linux/if_tun.h> + + int tun_set_queue(int fd, int enable) + { + struct ifreq ifr; + + memset(&ifr, 0, sizeof(ifr)); + + if (enable) + ifr.ifr_flags = IFF_ATTACH_QUEUE; + else + ifr.ifr_flags = IFF_DETACH_QUEUE; + + return ioctl(fd, TUNSETQUEUE, (void *)&ifr); + } + Universal TUN/TAP device driver Frequently Asked Question. 1. What platforms are supported by TUN/TAP driver ? diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt index ae66f9b90a25..fcaf0b4efba2 100644 --- a/Documentation/s390/s390dbf.txt +++ b/Documentation/s390/s390dbf.txt @@ -143,7 +143,8 @@ Parameter: id: handle for debug log Return Value: none -Description: frees memory for a debug log +Description: frees memory for a debug log and removes all registered debug + views. Must not be called within an interrupt handler --------------------------------------------------------------------------- diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx index 27a91cf43d6d..5020b7b5a244 100644 --- a/Documentation/scsi/LICENSE.qla2xxx +++ b/Documentation/scsi/LICENSE.qla2xxx @@ -1,4 +1,4 @@ -Copyright (c) 2003-2012 QLogic Corporation +Copyright (c) 2003-2013 QLogic Corporation QLogic Linux FC-FCoE Driver This program includes a device driver for Linux 3.x. diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index ce6581c8ca26..95731a08f257 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt @@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) power_save - Automatic power-saving timeout (in second, 0 = disable) - power_save_controller - Support runtime D3 of HD-audio controller - (-1 = on for supported chip (default), false = off, - true = force to on even for unsupported hardware) + power_save_controller - Reset HD-audio controller in power-saving mode + (default = on) align_buffer_size - Force rounding of buffer/period sizes to multiples of 128 bytes. This is more efficient in terms of memory access but isn't required by the HDA spec and prevents @@ -912,7 +911,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. models depending on the codec chip. The list of available models is found in HD-Audio-Models.txt - The model name "genric" is treated as a special case. When this + The model name "generic" is treated as a special case. When this model is given, the driver uses the generic codec parser without "codec-patch". It's sometimes good for testing and debugging. diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html index d9776cf60c07..9663b45f6fde 100644 --- a/Documentation/sound/alsa/seq_oss.html +++ b/Documentation/sound/alsa/seq_oss.html @@ -285,7 +285,7 @@ sample data. <H4> 7.2.4 Close Callback</H4> The <TT>close</TT> callback is called when this device is closed by the -applicaion. If any private data was allocated in open callback, it must +application. If any private data was allocated in open callback, it must be released in the close callback. The deletion of ALSA port should be done here, too. This callback must not be NULL. <H4> diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 53d6a3c51d87..a372304aef10 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -1873,7 +1873,7 @@ feature: status\input | 0 | 1 | else | --------------+------------+------------+------------+ - not allocated |(do nothing)| alloc+swap | EINVAL | + not allocated |(do nothing)| alloc+swap |(do nothing)| --------------+------------+------------+------------+ allocated | free | swap | clear | --------------+------------+------------+------------+ diff --git a/MAINTAINERS b/MAINTAINERS index 95616582c728..8bdd7a7ef2f4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1338,12 +1338,6 @@ S: Maintained F: drivers/platform/x86/asus*.c F: drivers/platform/x86/eeepc*.c -ASUS ASB100 HARDWARE MONITOR DRIVER -M: "Mark M. Hoffman" <mhoffman@lightlink.com> -L: lm-sensors@lm-sensors.org -S: Maintained -F: drivers/hwmon/asb100.c - ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API M: Dan Williams <djbw@fb.com> W: http://sourceforge.net/projects/xscaleiop @@ -1467,6 +1461,12 @@ F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h +ATMEL I2C DRIVER +M: Ludovic Desroches <ludovic.desroches@atmel.com> +L: linux-i2c@vger.kernel.org +S: Supported +F: drivers/i2c/busses/i2c-at91.c + ATMEL ISI DRIVER M: Josh Wu <josh.wu@atmel.com> L: linux-media@vger.kernel.org @@ -2629,7 +2629,7 @@ F: include/uapi/drm/ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) M: Daniel Vetter <daniel.vetter@ffwll.ch> -L: intel-gfx@lists.freedesktop.org (subscribers-only) +L: intel-gfx@lists.freedesktop.org L: dri-devel@lists.freedesktop.org T: git git://people.freedesktop.org/~danvet/drm-intel S: Supported @@ -3242,6 +3242,12 @@ F: Documentation/firmware_class/ F: drivers/base/firmware*.c F: include/linux/firmware.h +FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card) +M: Joshua Morris <josh.h.morris@us.ibm.com> +M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> +S: Maintained +F: drivers/block/rsxx/ + FLOPPY DRIVER M: Jiri Kosina <jkosina@suse.cz> T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git @@ -3851,7 +3857,7 @@ F: drivers/i2c/busses/i2c-ismt.c F: Documentation/i2c/busses/i2c-ismt I2C/SMBUS STUB DRIVER -M: "Mark M. Hoffman" <mhoffman@lightlink.com> +M: Jean Delvare <khali@linux-fr.org> L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/i2c-stub.c @@ -4005,6 +4011,22 @@ M: Stanislaw Gruszka <stf_xl@wp.pl> S: Maintained F: drivers/usb/atm/ueagle-atm.c +INA209 HARDWARE MONITOR DRIVER +M: Guenter Roeck <linux@roeck-us.net> +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/ina209 +F: Documentation/devicetree/bindings/i2c/ina209.txt +F: drivers/hwmon/ina209.c + +INA2XX HARDWARE MONITOR DRIVER +M: Guenter Roeck <linux@roeck-us.net> +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/ina2xx +F: drivers/hwmon/ina2xx.c +F: include/linux/platform_data/ina2xx.h + INDUSTRY PACK SUBSYSTEM (IPACK) M: Samuel Iglesias Gonsalvez <siglesias@igalia.com> M: Jens Taprogge <jens.taprogge@taprogge.org> @@ -4919,6 +4941,12 @@ W: logfs.org S: Maintained F: fs/logfs/ +LPC32XX MACHINE SUPPORT +M: Roland Stigge <stigge@antcom.de> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-lpc32xx/ + LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> @@ -5043,9 +5071,8 @@ S: Maintained F: drivers/net/ethernet/marvell/sk* MARVELL LIBERTAS WIRELESS DRIVER -M: Dan Williams <dcbw@redhat.com> L: libertas-dev@lists.infradead.org -S: Maintained +S: Orphan F: drivers/net/wireless/libertas/ MARVELL MV643XX ETHERNET DRIVER @@ -5098,6 +5125,15 @@ S: Maintained F: Documentation/hwmon/max6650 F: drivers/hwmon/max6650.c +MAX6697 HARDWARE MONITOR DRIVER +M: Guenter Roeck <linux@roeck-us.net> +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/max6697 +F: Documentation/devicetree/bindings/i2c/max6697.txt +F: drivers/hwmon/max6697.c +F: include/linux/platform_data/max6697.h + MAXIRADIO FM RADIO RECEIVER DRIVER M: Hans Verkuil <hverkuil@xs4all.nl> L: linux-media@vger.kernel.org @@ -5538,6 +5574,7 @@ F: include/uapi/linux/if_* F: include/uapi/linux/netdevice.h NETXEN (1/10) GbE SUPPORT +M: Manish Chopra <manish.chopra@qlogic.com> M: Sony Chacko <sony.chacko@qlogic.com> M: Rajesh Borundia <rajesh.borundia@qlogic.com> L: netdev@vger.kernel.org @@ -5622,6 +5659,14 @@ S: Maintained F: drivers/video/riva/ F: drivers/video/nvidia/ +NVM EXPRESS DRIVER +M: Matthew Wilcox <willy@linux.intel.com> +L: linux-nvme@lists.infradead.org +T: git git://git.infradead.org/users/willy/linux-nvme.git +S: Supported +F: drivers/block/nvme.c +F: include/linux/nvme.h + OMAP SUPPORT M: Tony Lindgren <tony@atomide.com> L: linux-omap@vger.kernel.org @@ -5650,7 +5695,7 @@ S: Maintained F: arch/arm/*omap*/*clock* OMAP POWER MANAGEMENT SUPPORT -M: Kevin Hilman <khilman@ti.com> +M: Kevin Hilman <khilman@deeprootsystems.com> L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/*omap*/*pm* @@ -5744,7 +5789,7 @@ F: arch/arm/*omap*/usb* OMAP GPIO DRIVER M: Santosh Shilimkar <santosh.shilimkar@ti.com> -M: Kevin Hilman <khilman@ti.com> +M: Kevin Hilman <khilman@deeprootsystems.com> L: linux-omap@vger.kernel.org S: Maintained F: drivers/gpio/gpio-omap.c @@ -6176,7 +6221,7 @@ F: include/linux/power_supply.h F: drivers/power/ PNP SUPPORT -M: Adam Belay <abelay@mit.edu> +M: Rafael J. Wysocki <rafael.j.wysocki@intel.com> M: Bjorn Helgaas <bhelgaas@google.com> S: Maintained F: drivers/pnp/ @@ -6412,6 +6457,8 @@ F: Documentation/networking/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER +M: Rajesh Borundia <rajesh.borundia@qlogic.com> +M: Shahed Shaikh <shahed.shaikh@qlogic.com> M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> M: Sony Chacko <sony.chacko@qlogic.com> M: linux-driver@qlogic.com @@ -6516,12 +6563,6 @@ S: Maintained F: Documentation/blockdev/ramdisk.txt F: drivers/block/brd.c -RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card) -M: Joshua Morris <josh.h.morris@us.ibm.com> -M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> -S: Maintained -F: drivers/block/rsxx/ - RANDOM NUMBER DRIVER M: Theodore Ts'o" <tytso@mit.edu> S: Maintained @@ -6590,7 +6631,7 @@ S: Supported F: fs/reiserfs/ REGISTER MAP ABSTRACTION -M: Mark Brown <broonie@opensource.wolfsonmicro.com> +M: Mark Brown <broonie@kernel.org> T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git S: Supported F: drivers/base/regmap/ @@ -6916,7 +6957,6 @@ F: drivers/scsi/st* SCTP PROTOCOL M: Vlad Yasevich <vyasevich@gmail.com> -M: Sridhar Samudrala <sri@us.ibm.com> M: Neil Horman <nhorman@tuxdriver.com> L: linux-sctp@vger.kernel.org W: http://lksctp.sourceforge.net @@ -7138,7 +7178,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c TI DAVINCI MACHINE SUPPORT M: Sekhar Nori <nsekhar@ti.com> -M: Kevin Hilman <khilman@ti.com> +M: Kevin Hilman <khilman@deeprootsystems.com> L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) T: git git://gitorious.org/linux-davinci/linux-davinci.git Q: http://patchwork.kernel.org/project/linux-davinci/list/ @@ -7171,13 +7211,6 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/sis/sis900.* -SIS 96X I2C/SMBUS DRIVER -M: "Mark M. Hoffman" <mhoffman@lightlink.com> -L: linux-i2c@vger.kernel.org -S: Maintained -F: Documentation/i2c/busses/i2c-sis96x -F: drivers/i2c/busses/i2c-sis96x.c - SIS FRAMEBUFFER DRIVER M: Thomas Winischhofer <thomas@winischhofer.net> W: http://www.winischhofer.net/linuxsisvga.shtml @@ -7255,7 +7288,7 @@ F: Documentation/hwmon/sch5627 F: drivers/hwmon/sch5627.c SMSC47B397 HARDWARE MONITOR DRIVER -M: "Mark M. Hoffman" <mhoffman@lightlink.com> +M: Jean Delvare <khali@linux-fr.org> L: lm-sensors@lm-sensors.org S: Maintained F: Documentation/hwmon/smsc47b397 @@ -7346,7 +7379,7 @@ F: sound/ SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) M: Liam Girdwood <lgirdwood@gmail.com> -M: Mark Brown <broonie@opensource.wolfsonmicro.com> +M: Mark Brown <broonie@kernel.org> T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://alsa-project.org/main/index.php/ASoC @@ -7435,7 +7468,7 @@ F: drivers/clk/spear/ SPI SUBSYSTEM M: Grant Likely <grant.likely@secretlab.ca> -M: Mark Brown <broonie@opensource.wolfsonmicro.com> +M: Mark Brown <broonie@kernel.org> L: spi-devel-general@lists.sourceforge.net Q: http://patchwork.kernel.org/project/spi-devel-general/list/ T: git git://git.secretlab.ca/git/linux-2.6.git @@ -7678,9 +7711,10 @@ F: include/linux/swiotlb.h SYNOPSYS ARC ARCHITECTURE M: Vineet Gupta <vgupta@synopsys.com> -L: linux-snps-arc@vger.kernel.org S: Supported F: arch/arc/ +F: Documentation/devicetree/bindings/arc/ +F: drivers/tty/serial/arc-uart.c SYSV FILESYSTEM M: Christoph Hellwig <hch@infradead.org> @@ -8679,7 +8713,7 @@ F: drivers/scsi/vmw_pvscsi.h VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood <lrg@ti.com> -M: Mark Brown <broonie@opensource.wolfsonmicro.com> +M: Mark Brown <broonie@kernel.org> W: http://opensource.wolfsonmicro.com/node/15 W: http://www.slimlogic.co.uk/?p=48 T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 9 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = NAME = Unicycling Gorilla # *DOCUMENTATION* @@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),) # Carefully list dependencies so we do not try to build scripts twice # in parallel PHONY += scripts -scripts: scripts_basic include/config/auto.conf include/config/tristate.conf +scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \ + asm-generic $(Q)$(MAKE) $(build)=$(@) # Objects we will link into vmlinux / subdirs we need to visit diff --git a/arch/Kconfig b/arch/Kconfig index 5a1779c93940..1455579791ec 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -319,13 +319,6 @@ config ARCH_WANT_OLD_COMPAT_IPC select ARCH_WANT_COMPAT_IPC_PARSE_VERSION bool -config HAVE_VIRT_TO_BUS - bool - help - An architecture should select this if it implements the - deprecated interface virt_to_bus(). All new architectures - should probably not select this. - config HAVE_ARCH_SECCOMP_FILTER bool help diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5833aa441481..8a33ba01301f 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -9,7 +9,7 @@ config ALPHA select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select AUTO_IRQ_AFFINITY if SMP select GENERIC_IRQ_SHOW diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa1..2cc3cc519c54 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax CHECKFLAGS += -D__alpha__ -m64 -cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data +cflags-y := -pipe -mno-fp-regs -ffixed-8 cflags-y += $(call cc-option, -fno-jump-tables) cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e73..bae97eb19d26 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - IRQF_DISABLED, "floppy", NULL) + 0, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #ifdef CONFIG_PCI diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd2215..7b2be251c30f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -117,13 +117,6 @@ handle_irq(int irq) return; } - /* - * From here we must proceed with IPL_MAX. Note that we do not - * explicitly enable interrupts afterwards - some MILO PALcode - * (namely LX164 one) seems to have severe problems with RTI - * at IPL 0. - */ - local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a8..f433fc11877a 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; + + /* + * Disable interrupts during IRQ handling. + * Note that there is no matching local_irq_enable() due to + * severe problems with RTI at IPL0 and some MILO PALcode + * (namely LX164). + */ + local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP @@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, { long cpu; - local_irq_disable(); smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { @@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708d..1383f8601a93 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -209,6 +213,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c5..a53cf03f49d5 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void) * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ - titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(63+16, titan_intr_nop, 0, "CChip Error", NULL); - titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(62+16, titan_intr_nop, 0, "PChip 0 H_Error", NULL); - titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(61+16, titan_intr_nop, 0, "PChip 1 H_Error", NULL); - titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(60+16, titan_intr_nop, 0, "PChip 0 C_Error", NULL); - titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(59+16, titan_intr_nop, 0, "PChip 1 C_Error", NULL); /* @@ -348,9 +348,9 @@ privateer_init_pci(void) * Hook a couple of extra err interrupts that the * common titan code won't. */ - titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(53+16, titan_intr_nop, 0, "NMI", NULL); - titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, + titan_request_irq(50+16, titan_intr_nop, 0, "Temperature Warning", NULL); /* diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 31f77aec0823..45b8e0cea176 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) - sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, + s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->length, dir); return nents; diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index f4c8d36ebecb..a26282857683 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *); */ #define ELF_PLATFORM (NULL) -#define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) - #endif diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 23daa326fc9b..eb2ae53187d9 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -415,7 +415,7 @@ *-------------------------------------------------------------*/ .macro SAVE_ALL_EXCEPTION marker - st \marker, [sp, 8] + st \marker, [sp, 8] /* orig_r8 */ st r0, [sp, 4] /* orig_r0, needed only for sys calls */ /* Restore r9 used to code the early prologue */ diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd84806b62f..eac071668201 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) " flag.nz %0 \n" : "=r"(temp), "=r"(flags) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) - : "cc"); + : "memory", "cc"); return flags; } @@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags) __asm__ __volatile__( " flag %0 \n" : - : "r"(flags)); + : "r"(flags) + : "memory"); } /* @@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void) " and %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) - : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); + : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) + : "memory"); } /* @@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void) __asm__ __volatile__( " lr %0, [status32] \n" - : "=&r"(temp)); + : "=&r"(temp) + : + : "memory"); return temp; } diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h index f3c4934f0ca9..4930957ca3d3 100644 --- a/arch/arc/include/asm/kgdb.h +++ b/arch/arc/include/asm/kgdb.h @@ -13,7 +13,7 @@ #ifdef CONFIG_KGDB -#include <asm/user.h> +#include <asm/ptrace.h> /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set * register API yet */ @@ -53,9 +53,7 @@ enum arc700_linux_regnums { }; #else -static inline void kgdb_trap(struct pt_regs *regs, int param) -{ -} +#define kgdb_trap(regs, param) #endif #endif /* __ARC_KGDB_H__ */ diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 8ae783d20a81..6179de7e07c2 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs) #define orig_r8_IS_SCALL 0x0001 #define orig_r8_IS_SCALL_RESTARTED 0x0002 #define orig_r8_IS_BRKPT 0x0004 -#define orig_r8_IS_EXCPN 0x0004 +#define orig_r8_IS_EXCPN 0x0008 #define orig_r8_IS_IRQ1 0x0010 #define orig_r8_IS_IRQ2 0x0020 diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h index e53a5340ba4f..dd785befe7fd 100644 --- a/arch/arc/include/asm/syscalls.h +++ b/arch/arc/include/asm/syscalls.h @@ -16,8 +16,6 @@ #include <linux/types.h> int sys_clone_wrapper(int, int, int, int, int); -int sys_fork_wrapper(void); -int sys_vfork_wrapper(void); int sys_cacheflush(uint32_t, uint32_t uint32_t); int sys_arc_settls(void *); int sys_arc_gettls(void); diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 6afa4f702075..30333cec0fef 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -28,14 +28,14 @@ */ struct user_regs_struct { - struct scratch { + struct { long pad; long bta, lp_start, lp_end, lp_count; long status32, ret, blink, fp, gp; long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; long sp; } scratch; - struct callee { + struct { long pad; long r25, r24, r23, r22, r21, r20; long r19, r18, r17, r16, r15, r14, r13; diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index ef6800ba2f03..91eeab81f52d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -452,7 +452,7 @@ tracesys: ; using ERET won't work since next-PC has already committed lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 - st r12, [r11, THREAD_FAULT_ADDR] + st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address ; PRE Sys Call Ptrace hook mov r0, sp ; pt_regs needed @@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork ;################### Special Sys Call Wrappers ########################## -; TBD: call do_fork directly from here -ARC_ENTRY sys_fork_wrapper - SAVE_CALLEE_SAVED_USER - bl @sys_fork - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -ARC_EXIT sys_fork_wrapper - -ARC_ENTRY sys_vfork_wrapper - SAVE_CALLEE_SAVED_USER - bl @sys_vfork - DISCARD_CALLEE_SAVED_USER - - GET_CURR_THR_INFO_FLAGS r10 - btst r10, TIF_SYSCALL_TRACE - bnz tracesys_exit - - b ret_from_system_call -ARC_EXIT sys_vfork_wrapper - ARC_ENTRY sys_clone_wrapper SAVE_CALLEE_SAVED_USER bl @sys_clone diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index 2888ba5be47e..52bdc83c1495 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -9,6 +9,7 @@ */ #include <linux/kgdb.h> +#include <linux/sched.h> #include <asm/disasm.h> #include <asm/cacheflush.h> diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index dc0f968dae0a..2d95ac07df7b 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "\n"); -#ifdef _ASM_GENERIC_UNISTD_H n += scnprintf(buf + n, len - n, - "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); -#endif + "OS ABI [v3]\t: no-legacy-syscalls\n"); return buf; } diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c index f6bdd07583f3..9d6c1ca26af6 100644 --- a/arch/arc/kernel/sys.c +++ b/arch/arc/kernel/sys.c @@ -6,8 +6,6 @@ #include <asm/syscalls.h> #define sys_clone sys_clone_wrapper -#define sys_fork sys_fork_wrapper -#define sys_vfork sys_vfork_wrapper #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5b714695b01b..1cacda426a0e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -49,7 +49,6 @@ config ARM select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 - select HAVE_VIRT_TO_BUS select KTIME_SCALAR select PERF_USE_VMALLOC select RTC_LIB @@ -556,7 +555,6 @@ config ARCH_IXP4XX config ARCH_DOVE bool "Marvell Dove" select ARCH_REQUIRE_GPIOLIB - select COMMON_CLK_DOVE select CPU_V7 select GENERIC_CLOCKEVENTS select MIGHT_HAVE_PCI @@ -744,6 +742,7 @@ config ARCH_RPC select NEED_MACH_IO_H select NEED_MACH_MEMORY_H select NO_IOPORT + select VIRT_TO_BUS help On the Acorn Risc-PC, Linux can support the internal IDE disk and CD-ROM interface, serial and parallel port, and the floppy drive. @@ -879,6 +878,7 @@ config ARCH_SHARK select ISA_DMA select NEED_MACH_MEMORY_H select PCI + select VIRT_TO_BUS select ZONE_DMA help Support for the StrongARM based Digital DNARD machine, also known @@ -1006,12 +1006,12 @@ config ARCH_MULTI_V4_V5 bool config ARCH_MULTI_V6 - bool "ARMv6 based platforms (ARM11, Scorpion, ...)" + bool "ARMv6 based platforms (ARM11)" select ARCH_MULTI_V6_V7 select CPU_V6 config ARCH_MULTI_V7 - bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" + bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" default y select ARCH_MULTI_V6_V7 select ARCH_VEXPRESS @@ -1183,9 +1183,9 @@ config ARM_NR_BANKS default 8 config IWMMXT - bool "Enable iWMMXt support" + bool "Enable iWMMXt support" if !CPU_PJ4 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 - default y if PXA27x || PXA3xx || ARCH_MMP + default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 help Enable support for iWMMXt context switching at run time if running on a CPU that supports it. @@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420 to deadlock. This workaround puts DSB before executing ISB if an abort may occur on cache maintenance. +config ARM_ERRATA_798181 + bool "ARM errata: TLBI/DSB failure on Cortex-A15" + depends on CPU_V7 && SMP + help + On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not + adequately shooting down all use of the old entries. This + option enables the Linux kernel workaround for this erratum + which sends an IPI to the CPUs that are running the same ASID + as the one being invalidated. + endmenu source "arch/arm/common/Kconfig" @@ -1462,10 +1472,6 @@ config ISA_DMA bool select ISA_DMA_API -config ARCH_NO_VIRT_TO_BUS - def_bool y - depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK - # Select ISA DMA interface config ISA_DMA_API bool @@ -1657,13 +1663,16 @@ config LOCAL_TIMERS accounting to be spread across the timer interval, preventing a "thundering herd" at every timer tick. +# The GPIO number here must be sorted by descending number. In case of +# a multiplatform kernel, we just want the highest value required by the +# selected platforms. config ARCH_NR_GPIO int default 1024 if ARCH_SHMOBILE || ARCH_TEGRA - default 355 if ARCH_U8500 - default 264 if MACH_H4700 default 512 if SOC_OMAP5 + default 355 if ARCH_U8500 default 288 if ARCH_VT8500 || ARCH_SUNXI + default 264 if MACH_H4700 default 0 help Maximum number of GPIOs in the system. @@ -1887,8 +1896,9 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM (EXPERIMENTAL)" - depends on ARM && OF + depends on ARM && AEABI && OF depends on CPU_V7 && !CPU_V6 + depends on !GENERIC_ATOMIC64 help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index acddddac7ee4..9b31f4311ea2 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -492,9 +492,10 @@ config DEBUG_IMX_UART_PORT DEBUG_IMX31_UART || \ DEBUG_IMX35_UART || \ DEBUG_IMX51_UART || \ - DEBUG_IMX50_IMX53_UART || \ + DEBUG_IMX53_UART || \ DEBUG_IMX6Q_UART default 1 + depends on ARCH_MXC help Choose UART port on which kernel low-level debug messages should be output. diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 71768b8a1ab9..84aa2caf07ed 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -115,4 +115,4 @@ i: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" -subdir- := bootp compressed +subdir- := bootp compressed dts diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57dd9f30..3234875824dc 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts @@ -54,7 +54,7 @@ }; mvsdio@d00d4000 { - pinctrl-0 = <&sdio_pins2>; + pinctrl-0 = <&sdio_pins3>; pinctrl-names = "default"; status = "okay"; /* diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts index f8e4855bc9a5..070bba4f2585 100644 --- a/arch/arm/boot/dts/armada-370-rd.dts +++ b/arch/arm/boot/dts/armada-370-rd.dts @@ -64,5 +64,13 @@ status = "okay"; /* No CD or WP GPIOs */ }; + + usb@d0050000 { + status = "okay"; + }; + + usb@d0051000 { + status = "okay"; + }; }; }; diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi index 6f1acc75e155..5b708208b607 100644 --- a/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/arch/arm/boot/dts/armada-370-xp.dtsi @@ -31,7 +31,6 @@ mpic: interrupt-controller@d0020000 { compatible = "marvell,mpic"; #interrupt-cells = <1>; - #address-cells = <1>; #size-cells = <1>; interrupt-controller; }; @@ -54,7 +53,7 @@ reg = <0xd0012000 0x100>; reg-shift = <2>; interrupts = <41>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; serial@d0012100 { @@ -62,7 +61,7 @@ reg = <0xd0012100 0x100>; reg-shift = <2>; interrupts = <42>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d138020e..a195debb67d3 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -59,6 +59,12 @@ "mpp50", "mpp51", "mpp52"; marvell,function = "sd0"; }; + + sdio_pins3: sdio-pins3 { + marvell,pins = "mpp48", "mpp49", "mpp50", + "mpp51", "mpp52", "mpp53"; + marvell,function = "sd0"; + }; }; gpio0: gpio@d0018100 { diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index 1443949c165e..ca00d8326c87 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi @@ -46,7 +46,7 @@ reg = <0xd0012200 0x100>; reg-shift = <2>; interrupts = <43>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; serial@d0012300 { @@ -54,7 +54,7 @@ reg = <0xd0012300 0x100>; reg-shift = <2>; interrupts = <44>; - reg-io-width = <4>; + reg-io-width = <1>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index aa98e641931f..a98c0d50fbbe 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -238,8 +238,32 @@ nand { pinctrl_nand: nand-0 { atmel,pins = - <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ - 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ + <3 0 0x1 0x0 /* PD0 periph A Read Enable */ + 3 1 0x1 0x0 /* PD1 periph A Write Enable */ + 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */ + 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */ + 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */ + 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */ + 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */ + 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */ + 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */ + 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */ + 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */ + 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */ + 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */ + 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */ + }; + + pinctrl_nand_16bits: nand_16bits-0 { + atmel,pins = + <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */ + 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */ + 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */ + 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */ + 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */ + 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */ + 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */ + 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */ }; }; diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index 4bf2a8774aa7..7e0481e2441a 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi @@ -105,7 +105,7 @@ compatible = "fixed-clock"; reg = <1>; #clock-cells = <0>; - clock-frequency = <150000000>; + clock-frequency = <250000000>; }; }; }; diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 69140ba99f46..aaa63d0a8096 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi @@ -191,8 +191,8 @@ prcmu: prcmu@80157000 { compatible = "stericsson,db8500-prcmu"; - reg = <0x80157000 0x1000>; - reg-names = "prcmu"; + reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; + reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; interrupts = <0 47 0x4>; #address-cells = <1>; #size-cells = <1>; @@ -319,9 +319,8 @@ }; }; - ab8500@5 { + ab8500 { compatible = "stericsson,ab8500"; - reg = <5>; /* mailbox 5 is i2c */ interrupt-parent = <&intc>; interrupts = <0 40 0x4>; interrupt-controller; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 67dbe20868a2..f7509cafc377 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -197,6 +197,11 @@ status = "disabled"; }; + rtc@d8500 { + compatible = "marvell,orion-rtc"; + reg = <0xd8500 0x20>; + }; + crypto: crypto@30000 { compatible = "marvell,orion-crypto"; reg = <0x30000 0x10000>, diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index e1347fceb5bc..1a62bcf18aa3 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -275,18 +275,27 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x12680000 0x1000>; interrupts = <0 35 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; pdma1: pdma@12690000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x12690000 0x1000>; interrupts = <0 36 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; mdma1: mdma@12850000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x12850000 0x1000>; interrupts = <0 34 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <1>; }; }; }; diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 5f3562ad6746..9a99755920c0 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi @@ -142,12 +142,18 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0x120000 0x1000>; interrupts = <0 34 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; pdma1: pdma@121B0000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x121000 0x1000>; interrupts = <0 35 0>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; }; diff --git a/arch/arm/boot/dts/href.dtsi b/arch/arm/boot/dts/href.dtsi index 592fb9dc35bd..379128eb9d98 100644 --- a/arch/arm/boot/dts/href.dtsi +++ b/arch/arm/boot/dts/href.dtsi @@ -221,7 +221,7 @@ }; }; - ab8500@5 { + ab8500 { ab8500-regulators { ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { regulator-name = "V-DISPLAY"; diff --git a/arch/arm/boot/dts/hrefv60plus.dts b/arch/arm/boot/dts/hrefv60plus.dts index 55f4191a626e..2b587a74b813 100644 --- a/arch/arm/boot/dts/hrefv60plus.dts +++ b/arch/arm/boot/dts/hrefv60plus.dts @@ -158,7 +158,7 @@ }; }; - ab8500@5 { + ab8500 { ab8500-regulators { ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { regulator-name = "V-DISPLAY"; diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17c3a29..fd36e1cca104 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -152,7 +152,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; sgtl5000: codec@0a { diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8aa7fff..6c6a5442800a 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -70,7 +70,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; rtc: rtc@51 { diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts index e54fffd48369..468c0a1d48d9 100644 --- a/arch/arm/boot/dts/imx53-mba53.dts +++ b/arch/arm/boot/dts/imx53-mba53.dts @@ -42,10 +42,9 @@ fsl,pins = <689 0x10000 /* DISP1_DRDY */ 482 0x10000 /* DISP1_HSYNC */ 489 0x10000 /* DISP1_VSYNC */ - 684 0x10000 /* DISP1_DAT_0 */ 515 0x10000 /* DISP1_DAT_22 */ 523 0x10000 /* DISP1_DAT_23 */ - 543 0x10000 /* DISP1_DAT_21 */ + 545 0x10000 /* DISP1_DAT_21 */ 553 0x10000 /* DISP1_DAT_20 */ 558 0x10000 /* DISP1_DAT_19 */ 564 0x10000 /* DISP1_DAT_18 */ diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b4581..281a223591ff 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -91,6 +91,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x00a00600 0x20>; interrupts = <1 13 0xf01>; + clocks = <&clks 15>; }; L2: l2-cache@00a02000 { diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts index 5bb0bf39d3b8..c9c44b2f62d7 100644 --- a/arch/arm/boot/dts/kirkwood-dns320.dts +++ b/arch/arm/boot/dts/kirkwood-dns320.dts @@ -42,12 +42,10 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <166666667>; status = "okay"; }; serial@12100 { - clock-frequency = <166666667>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts index d430713ea9b9..e4e4930dc5cf 100644 --- a/arch/arm/boot/dts/kirkwood-dns325.dts +++ b/arch/arm/boot/dts/kirkwood-dns325.dts @@ -50,7 +50,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts index 2e3dd34e21a5..0196cf6b0ef2 100644 --- a/arch/arm/boot/dts/kirkwood-dockstar.dts +++ b/arch/arm/boot/dts/kirkwood-dockstar.dts @@ -37,7 +37,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts index ef2d8c705709..289e51d86372 100644 --- a/arch/arm/boot/dts/kirkwood-dreamplug.dts +++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts @@ -38,7 +38,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index 1b133e0c566e..c3573be7b92c 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -73,11 +73,11 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; nand@3000000 { + chip-delay = <40>; status = "okay"; partition@0 { diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index 71902da33d63..5335b1aa8601 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -51,7 +51,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "okay"; }; diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts index 504f16be8b54..12ccf74ac3c4 100644 --- a/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -78,7 +78,6 @@ }; }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 6cae4599c4b3..3694e94f6e99 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts @@ -96,11 +96,11 @@ marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { - marvell,pins = "mpp44"; + marvell,pins = "mpp46"; marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { - marvell,pins = "mpp45"; + marvell,pins = "mpp47"; marvell,function = "gpio"; }; @@ -115,7 +115,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; @@ -158,14 +157,14 @@ gpios = <&gpio0 16 0>; linux,default-trigger = "default-on"; }; - health_led1 { + rebuild_led { + label = "status:white:rebuild_led"; + gpios = <&gpio1 4 0>; + }; + health_led { label = "status:red:health_led"; gpios = <&gpio1 5 0>; }; - health_led2 { - label = "status:white:health_led"; - gpios = <&gpio1 4 0>; - }; backup_led { label = "status:blue:backup_led"; gpios = <&gpio0 15 0>; diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts index 8db3123ac80f..5bbd0542cdd3 100644 --- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts +++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts @@ -34,7 +34,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts index 9510c9ea666c..9f55d95f35f5 100644 --- a/arch/arm/boot/dts/kirkwood-lschlv2.dts +++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts @@ -13,7 +13,6 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <166666667>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts index 739019c4cba9..5c84c118ed8d 100644 --- a/arch/arm/boot/dts/kirkwood-lsxhl.dts +++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts @@ -13,7 +13,6 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <200000000>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts index 662dfd81b1ce..758824118a9a 100644 --- a/arch/arm/boot/dts/kirkwood-mplcec4.dts +++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts @@ -90,7 +90,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi index e8e7ecef1650..6affd924fe11 100644 --- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi +++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi @@ -23,7 +23,6 @@ }; serial@12000 { - clock-frequency = <166666667>; status = "okay"; }; diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts index 3a178cf708d7..a7412b937a8a 100644 --- a/arch/arm/boot/dts/kirkwood-nsa310.dts +++ b/arch/arm/boot/dts/kirkwood-nsa310.dts @@ -117,7 +117,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts index ede7fe0d7a87..d27f7245f8e7 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts @@ -18,12 +18,10 @@ ocp@f1000000 { serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; serial@12100 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts index 842ff95d60df..66eb45b00b25 100644 --- a/arch/arm/boot/dts/kirkwood-topkick.dts +++ b/arch/arm/boot/dts/kirkwood-topkick.dts @@ -108,7 +108,6 @@ }; serial@12000 { - clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index 2c738d9dc82a..fada7e6d24d8 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -38,6 +38,7 @@ interrupt-controller; #interrupt-cells = <2>; interrupts = <35>, <36>, <37>, <38>; + clocks = <&gate_clk 7>; }; gpio1: gpio@10140 { @@ -49,6 +50,7 @@ interrupt-controller; #interrupt-cells = <2>; interrupts = <39>, <40>, <41>; + clocks = <&gate_clk 7>; }; serial@12000 { @@ -57,7 +59,6 @@ reg-shift = <2>; interrupts = <33>; clocks = <&gate_clk 7>; - /* set clock-frequency in board dts */ status = "disabled"; }; @@ -67,7 +68,6 @@ reg-shift = <2>; interrupts = <34>; clocks = <&gate_clk 7>; - /* set clock-frequency in board dts */ status = "disabled"; }; @@ -75,6 +75,7 @@ compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc"; reg = <0x10300 0x20>; interrupts = <53>; + clocks = <&gate_clk 7>; }; spi@10600 { diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts index 5a3a58b7e18f..0077fc8510b7 100644 --- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts +++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts @@ -11,7 +11,7 @@ / { model = "LaCie Ethernet Disk mini V2"; - compatible = "lacie,ethernet-disk-mini-v2", "marvell-orion5x-88f5182", "marvell,orion5x"; + compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { reg = <0x00000000 0x4000000>; /* 64 MB */ diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f81ed9..f7bec3b1ba32 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -13,6 +13,9 @@ compatible = "marvell,orion5x"; interrupt-parent = <&intc>; + aliases { + gpio0 = &gpio0; + }; intc: interrupt-controller { compatible = "marvell,orion-intc", "marvell,intc"; interrupt-controller; @@ -32,7 +35,9 @@ #gpio-cells = <2>; gpio-controller; reg = <0x10100 0x40>; - ngpio = <32>; + ngpios = <32>; + interrupt-controller; + #interrupt-cells = <2>; interrupts = <6>, <7>, <8>, <9>; }; @@ -91,7 +96,7 @@ reg = <0x90000 0x10000>, <0xf2200000 0x800>; reg-names = "regs", "sram"; - interrupts = <22>; + interrupts = <28>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts index 27f31a5fa494..d3ec32f6b790 100644 --- a/arch/arm/boot/dts/snowball.dts +++ b/arch/arm/boot/dts/snowball.dts @@ -298,7 +298,7 @@ }; }; - ab8500@5 { + ab8500 { ab8500-regulators { ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { regulator-name = "V-DISPLAY"; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 936d2306e7e1..7e8769bd5977 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -75,6 +75,9 @@ compatible = "arm,pl330", "arm,primecell"; reg = <0xffe01000 0x1000>; interrupts = <0 180 4>; + #dma-cells = <1>; + #dma-channels = <8>; + #dma-requests = <32>; }; }; diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 9a428931d042..3d3f64d2111a 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -118,6 +118,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x50040600 0x20>; interrupts = <1 13 0x304>; + clocks = <&tegra_car 132>; }; intc: interrupt-controller { @@ -384,7 +385,7 @@ spi@7000d800 { compatible = "nvidia,tegra20-slink"; - reg = <0x7000d480 0x200>; + reg = <0x7000d800 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index 767803e1fd55..dbf46c272562 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -119,6 +119,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x50040600 0x20>; interrupts = <1 13 0xf04>; + clocks = <&tegra_car 214>; }; intc: interrupt-controller { @@ -371,7 +372,7 @@ spi@7000d800 { compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; - reg = <0x7000d480 0x200>; + reg = <0x7000d800 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index fbbc5bb022d5..6a99e30f81d2 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -116,6 +116,7 @@ CONFIG_SND_SOC=y CONFIG_SND_MXS_SOC=y CONFIG_SND_SOC_MXS_SGTL5000=y CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_STORAGE=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b16bae2c9a60..bd07864f14a0 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -126,6 +126,8 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_TWL4030_PWRBUTTON=y CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=32 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a81..dff714d886d5 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; + unsigned long ticks_per_jiffy; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15704ed..ea289e1435e7 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -19,14 +19,6 @@ #undef _CACHE #undef MULTI_CACHE -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - #if defined(CONFIG_CPU_CACHE_V4) # ifdef _CACHE # define MULTI_CACHE 1 diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fbe2477..ed94b1a366ae 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484d..91b99abe7a95 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); #endif #endif +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + #ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); #else diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323c..a7b85e0d0cc1 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +DECLARE_PER_CPU(atomic64_t, active_asids); + #else /* !CONFIG_CPU_HAS_ASID */ #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..86b8fe398b95 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -111,7 +111,7 @@ #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..ab865e65a84c 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -14,7 +14,6 @@ #include <asm/glue.h> -#define TLB_V3_PAGE (1 << 0) #define TLB_V4_U_PAGE (1 << 1) #define TLB_V4_D_PAGE (1 << 2) #define TLB_V4_I_PAGE (1 << 3) @@ -22,7 +21,6 @@ #define TLB_V6_D_PAGE (1 << 5) #define TLB_V6_I_PAGE (1 << 6) -#define TLB_V3_FULL (1 << 8) #define TLB_V4_U_FULL (1 << 9) #define TLB_V4_D_FULL (1 << 10) #define TLB_V4_I_FULL (1 << 11) @@ -52,7 +50,6 @@ * ============= * * We have the following to choose from: - * v3 - ARMv3 * v4 - ARMv4 without write buffer * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction @@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); @@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { + if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { - tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); @@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && + if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); @@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_WB)) dsb(); - tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); @@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void) isb(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static inline void dummy_flush_tlb_a15_erratum(void) +{ + /* + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. + */ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(); +} +#else +static inline void dummy_flush_tlb_a15_erratum(void) +{ +} +#endif + /* * flush_pmd_entry * diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 5c27696de14f..8b1f37bfeeec 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -2,6 +2,7 @@ #define _ASM_ARM_XEN_EVENTS_H #include <asm/ptrace.h> +#include <asm/atomic.h> enum ipi_vector { XEN_PLACEHOLDER_VECTOR, @@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->ARM_cpsr); } -/* - * We cannot use xchg because it does not support 8-byte - * values. However it is safe to use {ldr,dtd}exd directly because all - * platforms which Xen can run on support those instructions. - */ -static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val) -{ - xen_ulong_t oldval; - unsigned int tmp; - - wmb(); - asm volatile("@ xchg_xen_ulong\n" - "1: ldrexd %0, %H0, [%3]\n" - " strexd %1, %2, %H2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (oldval), "=&r" (tmp) - : "r" (val), "r" (ptr) - : "memory", "cc"); - return oldval; -} +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ + atomic64_t, \ + counter), (val)) #endif /* _ASM_ARM_XEN_EVENTS_H */ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) */ .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) .endm .macro mcount_get_lr reg @@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) .endm ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE mov ip, lr ldmia sp!, {lr} @@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) #else __mcount #endif +UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) +UNWIND(.fnstart) __ftrace_caller +UNWIND(.fnend) ENDPROC(ftrace_caller) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) __ftrace_graph_caller +UNWIND(.fnend) ENDPROC(ftrace_graph_caller) #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae77..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -267,7 +267,7 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] -#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab90..1fd749ee4a1b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) } if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); + pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } @@ -987,7 +987,7 @@ clear_vcr: isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to disable vector catch\n", cpu); + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } @@ -1007,7 +1007,7 @@ clear_vcr: } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } @@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +static struct notifier_block dbg_cpu_pm_nb = { .notifier_call = dbg_cpu_pm_notify, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 146157dfe27c..8c3094d0f7b7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b9ec21..59d2adb764a9 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..234e339196c0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -56,7 +56,6 @@ #include <asm/virt.h> #include "atags.h" -#include "tcm.h" #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) @@ -353,6 +352,23 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +static void __init cpuid_init_hwcaps(void) +{ + unsigned int divide_instrs; + + if (cpu_architecture() < CPU_ARCH_ARMv7) + return; + + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + + switch (divide_instrs) { + case 2: + elf_hwcap |= HWCAP_IDIVA; + case 1: + elf_hwcap |= HWCAP_IDIVT; + } +} + static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -483,8 +499,11 @@ static void __init setup_processor(void) snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; + + cpuid_init_hwcaps(); + #ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif feat_v6_fixup(); @@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE if (bank->start + size < bank->start) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); @@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p) reserve_crashkernel(); - tcm_init(); - #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 31644f1978d5..1f2ccccaf009 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY; - evt->rating = 400; + evt->rating = 100; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; @@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd0300531399..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@ #include <asm/smp_plat.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> /**********************************************************************/ @@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} +#else +static int erratum_a15_798181(void) +{ + return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ + dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, + NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ + int cpu; + cpumask_t mask = { CPU_BITS_NONE }; + + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + /* + * We only need to send an IPI if the other CPUs are running + * the same ASID as the one being invalidated. There is no + * need for locking around the active_asids check since the + * switch_mm() function has at least one dmb() (as required by + * this workaround) in case a context switch happens on + * another CPU after the condition below. + */ + if (atomic64_read(&mm->context.id) == + atomic64_read(&per_cpu(active_asids, cpu))) + cpumask_set_cpu(cpu, &mask); + } + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +} + void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); + broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) @@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); + broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) &ta, 1); } else local_flush_tlb_page(vma, uaddr); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); + broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, @@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, &ta, 1); } else local_flush_tlb_range(vma, start, end); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); + broadcast_tlb_a15_erratum(); } void flush_bp_all(void) diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb4a310..f50f19e5c138 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -17,7 +17,6 @@ #include <asm/mach/map.h> #include <asm/memory.h> #include <asm/system_info.h> -#include "tcm.h" static struct gen_pool *tcm_pool; static bool dtcm_present; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb24..c1fe498983ac 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; + break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a982269c..7bed7556077a 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, u32 val; int cpu; - cpu = get_cpu(); - if (!p->is_write) return read_from_write_only(vcpu, p); + cpu = get_cpu(); + cpumask_setall(&vcpu->arch.require_dcache_flush); cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; - - goto out; + return true; } /* Try to use another LR for this interrupt */ @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); -out: if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); - /* - * We do not need to take the distributor lock here, since the only - * action we perform is clearing the irq_active_bit for an EOIed - * level interrupt. There is a potential race with - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we - * check if the interrupt is already active. Two possibilities: - * - * - The queuing is occurring on the same vcpu: cannot happen, - * as we're already in the context of this vcpu, and - * executing the handler - * - The interrupt has been migrated to another vcpu, and we - * ignore this interrupt for this run. Big deal. It is still - * pending though, and will get considered when this vcpu - * exits. - */ if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { /* * Some level interrupts have been EOIed. Clear their @@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } else { vgic_cpu_irq_clear(vcpu, irq); } + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; } } @@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) } /* - * Sync back the VGIC state after a guest run. We do not really touch - * the distributor here (the irq_pending_on_cpu bit is safe to set), - * so there is no need for taking its lock. + * Sync back the VGIC state after a guest run. The distributor lock is + * needed so we don't get preempted in the middle of the state processing. */ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { @@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + if (!irqchip_in_kernel(vcpu->kvm)) return; + spin_lock(&dist->lock); __kvm_vgic_sync_hwstate(vcpu); + spin_unlock(&dist->lock); } int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c7..64dbfa57204a 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) static void __timer_const_udelay(unsigned long xloops) { unsigned long long loops = xloops; - loops *= loops_per_jiffy; + loops *= arm_delay_ops.ticks_per_jiffy; __timer_delay(loops >> UDELAY_SHIFT); } @@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) pr_info("Switching to timer-based delay loop\n"); delay_timer = timer; lpj_fine = timer->freq / HZ; - loops_per_jiffy = lpj_fine; + + /* cpufreq may scale loops_per_jiffy, so keep a private copy */ + arm_delay_ops.ticks_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; - arm_delay_ops.const_clock = true; + delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index d912e7397ecc..94b0650ea98f 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -14,31 +14,15 @@ .text .align 5 - .word 0 - -1: subs r2, r2, #4 @ 1 do we have enough - blt 5f @ 1 bytes to align with? - cmp r3, #2 @ 1 - strltb r1, [ip], #1 @ 1 - strleb r1, [ip], #1 @ 1 - strb r1, [ip], #1 @ 1 - add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) -/* - * The pointer is now aligned and the length is adjusted. Try doing the - * memset again. - */ ENTRY(memset) -/* - * Preserve the contents of r0 for the return value. - */ - mov ip, r0 - ands r3, ip, #3 @ 1 unaligned? - bne 1b @ 1 + ands r3, r0, #3 @ 1 unaligned? + mov ip, r0 @ preserve r0 as return value + bne 6f @ 1 /* * we know that the pointer in ip is aligned to a word boundary. */ - orr r1, r1, r1, lsl #8 +1: orr r1, r1, r1, lsl #8 orr r1, r1, r1, lsl #16 mov r3, r1 cmp r2, #16 @@ -127,4 +111,13 @@ ENTRY(memset) tst r2, #1 strneb r1, [ip], #1 mov pc, lr + +6: subs r2, r2, #4 @ 1 do we have enough + blt 5b @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r1, [ip], #1 @ 1 + strleb r1, [ip], #1 @ 1 + strb r1, [ip], #1 @ 1 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) + b 1b ENDPROC(memset) diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c index 2ea7059b840b..c20a870ea9c9 100644 --- a/arch/arm/mach-at91/board-foxg20.c +++ b/arch/arm/mach-at91/board-foxg20.c @@ -176,6 +176,7 @@ static struct w1_gpio_platform_data w1_gpio_pdata = { /* If you choose to use a pin other than PB16 it needs to be 3.3V */ .pin = AT91_PIN_PB16, .is_open_drain = 1, + .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c index a033b8df9fb2..869cbecf00b7 100644 --- a/arch/arm/mach-at91/board-stamp9g20.c +++ b/arch/arm/mach-at91/board-stamp9g20.c @@ -188,6 +188,7 @@ static struct spi_board_info portuxg20_spi_devices[] = { static struct w1_gpio_platform_data w1_gpio_pdata = { .pin = AT91_PIN_PA29, .is_open_drain = 1, + .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h index eed465ab0dd7..5fc23771c154 100644 --- a/arch/arm/mach-at91/include/mach/gpio.h +++ b/arch/arm/mach-at91/include/mach/gpio.h @@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin); extern void at91_gpio_suspend(void); extern void at91_gpio_resume(void); +#ifdef CONFIG_PINCTRL_AT91 +extern void at91_pinctrl_gpio_suspend(void); +extern void at91_pinctrl_gpio_resume(void); +#else +static inline void at91_pinctrl_gpio_suspend(void) {} +static inline void at91_pinctrl_gpio_resume(void) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c index 8e210262aeee..e0ca59171022 100644 --- a/arch/arm/mach-at91/irq.c +++ b/arch/arm/mach-at91/irq.c @@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value) void at91_irq_suspend(void) { - int i = 0, bit; + int bit = -1; if (has_aic5()) { /* disable enabled irqs */ - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IDCR, 1); - i = bit; } /* enable wakeup irqs */ - i = 0; - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { + bit = -1; + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IECR, 1); - i = bit; } } else { at91_aic_write(AT91_AIC_IDCR, *backups); @@ -118,23 +116,21 @@ void at91_irq_suspend(void) void at91_irq_resume(void) { - int i = 0, bit; + int bit = -1; if (has_aic5()) { /* disable wakeup irqs */ - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IDCR, 1); - i = bit; } /* enable irqs disabled for suspend */ - i = 0; - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { + bit = -1; + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { at91_aic_write(AT91_AIC5_SSR, bit & AT91_AIC5_INTSEL_MSK); at91_aic_write(AT91_AIC5_IECR, 1); - i = bit; } } else { at91_aic_write(AT91_AIC_IDCR, *wakeups); diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index adb6db888a1f..73f1f250403a 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz; static int at91_pm_enter(suspend_state_t state) { - at91_gpio_suspend(); + if (of_have_populated_dt()) + at91_pinctrl_gpio_suspend(); + else + at91_gpio_suspend(); at91_irq_suspend(); pr_debug("AT91: PM - wake mask %08x, pm state %d\n", @@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state) error: target_state = PM_SUSPEND_ON; at91_irq_resume(); - at91_gpio_resume(); + if (of_have_populated_dt()) + at91_pinctrl_gpio_resume(); + else + at91_gpio_resume(); return 0; } diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26cc0cb..52e4bb5cf12d 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -22,19 +22,9 @@ static struct map_desc cns3xxx_io_desc[] __initdata = { { - .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), - .length = SZ_4K, + .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), + .length = SZ_8K, .type = MT_DEVICE, }, { .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e57f289..b1021aafa481 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h @@ -94,10 +94,10 @@ #define RTC_INTR_STS_OFFSET 0x34 #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ -#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ +#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ -#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 +#define CNS3XXX_PM_BASE_VIRT 0xFB001000 #define PM_CLK_GATE_OFFSET 0x00 #define PM_SOFT_RST_OFFSET 0x04 @@ -109,7 +109,7 @@ #define PM_PLL_HM_PD_OFFSET 0x1C #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ -#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 +#define CNS3XXX_UART0_BASE_VIRT 0xFB002000 #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 @@ -130,7 +130,7 @@ #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ -#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 +#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 #define TIMER1_COUNTER_OFFSET 0x00 #define TIMER1_AUTO_RELOAD_OFFSET 0x04 @@ -227,16 +227,16 @@ * Testchip peripheral and fpga gic regions */ #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ -#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 +#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ -#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 +#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 -#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 +#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ -#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 +#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index a685e9706b7b..45b7c71d9cc1 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c @@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel); */ int edma_alloc_slot(unsigned ctlr, int slot) { + if (!edma_cc[ctlr]) + return -EINVAL; + if (slot >= 0) slot = EDMA_CHAN_SLOT(slot); diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4dd82ab..b5cc77d2380b 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h @@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr) static inline void putc(int c) { - /* Transmit fifo not full? */ - while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) - ; + int i; + + for (i = 0; i < 10000; i++) { + /* Transmit fifo not full? */ + if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) + break; + } __raw_writeb(c, PHYS_UART_DATA); } diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index abda5a18a664..0f2111a11315 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -67,6 +67,7 @@ config ARCH_NETWINDER select ISA select ISA_DMA select PCI + select VIRT_TO_BUS help Say Y here if you intend to run this kernel on the Rebel.COM NetWinder. Information about this machine can be found at: diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index f30c52843396..890cae23c12a 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c @@ -28,13 +28,11 @@ extern void secondary_startup(void); */ void __ref highbank_cpu_die(unsigned int cpu) { - flush_cache_all(); - highbank_set_cpu_jump(cpu, phys_to_virt(0)); - highbank_set_core_pwr(); - cpu_do_idle(); + flush_cache_louis(); + highbank_set_core_pwr(); - /* We should never return from idle */ - panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); + while (1) + cpu_do_idle(); } diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 74e3a34d78b8..2193c834f55c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); + clk_register_clkdev(clk[admux_gate], "audmux", NULL); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); @@ -264,6 +265,8 @@ int __init mx35_clocks_init(void) clk_prepare_enable(clk[gpio3_gate]); clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); + clk_prepare_enable(clk[max_gate]); + clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 7b025ee528a5..d38e54f5b6d7 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; -static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; +static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; @@ -172,7 +172,7 @@ static struct clk *clk[clk_max]; static struct clk_onecell_data clk_data; static enum mx6q_clks const clks_init_on[] __initconst = { - mmdc_ch0_axi, rom, + mmdc_ch0_axi, rom, pll1_sys, }; static struct clk_div_table clk_enet_ref_table[] = { @@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); - clk_register_clkdev(clk[twd], NULL, "smp_twd"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bfcec5b..5bf4a97ab241 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *); extern void imx_enable_cpu(int cpu, bool enable); extern void imx_set_cpu_jump(int cpu, void *jump_addr); +extern u32 imx_get_cpu_arg(int cpu); +extern void imx_set_cpu_arg(int cpu, u32 arg); extern void v7_cpu_resume(void); extern u32 *pl310_get_save_ptr(void); #ifdef CONFIG_SMP diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S index 921fc1555854..a58c8b0527cc 100644 --- a/arch/arm/mach-imx/headsmp.S +++ b/arch/arm/mach-imx/headsmp.S @@ -26,16 +26,16 @@ ENDPROC(v7_secondary_startup) #ifdef CONFIG_PM /* - * The following code is located into the .data section. This is to - * allow phys_l2x0_saved_regs to be accessed with a relative load - * as we are running on physical address here. + * The following code must assume it is running from physical address + * where absolute virtual addresses to the data section have to be + * turned into relative ones. */ - .data - .align #ifdef CONFIG_CACHE_L2X0 .macro pl310_resume - ldr r2, phys_l2x0_saved_regs + adr r0, l2x0_saved_regs_offset + ldr r2, [r0] + add r2, r2, r0 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl @@ -43,9 +43,9 @@ ENDPROC(v7_secondary_startup) str r1, [r0, #L2X0_CTRL] @ re-enable L2 .endm - .globl phys_l2x0_saved_regs -phys_l2x0_saved_regs: - .long 0 +l2x0_saved_regs_offset: + .word l2x0_saved_regs - . + #else .macro pl310_resume .endm diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda2..361a253e2b63 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void) void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); + /* + * We use the cpu jumping argument register to sync with + * imx_cpu_kill() which is running on cpu0 and waiting for + * the register being cleared to kill the cpu. + */ + imx_set_cpu_arg(cpu, ~0); cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + + while (imx_get_cpu_arg(cpu) == 0) + if (time_after(jiffies, timeout)) + return 0; imx_enable_cpu(cpu, false); + imx_set_cpu_arg(cpu, 0); return 1; } diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c index 03b65e5ea541..82348391582a 100644 --- a/arch/arm/mach-imx/imx25-dt.c +++ b/arch/arm/mach-imx/imx25-dt.c @@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = { NULL }; +static void __init imx25_timer_init(void) +{ + mx25_clocks_init_dt(); +} + DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") .map_io = mx25_map_io, .init_early = imx25_init_early, diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index ee42d20cba19..5faba7a3c95f 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c @@ -22,8 +22,6 @@ #include "common.h" #include "hardware.h" -extern unsigned long phys_l2x0_saved_regs; - static int imx6q_suspend_finish(unsigned long val) { cpu_do_idle(); @@ -57,18 +55,5 @@ static const struct platform_suspend_ops imx6q_pm_ops = { void __init imx6q_pm_init(void) { - /* - * The l2x0 core code provides an infrastucture to save and restore - * l2x0 registers across suspend/resume cycle. But because imx6q - * retains L2 content during suspend and needs to resume L2 before - * MMU is enabled, it can only utilize register saving support and - * have to take care of restoring on its own. So we save physical - * address of the data structure used by l2x0 core to save registers, - * and later restore the necessary ones in imx6q resume entry. - */ -#ifdef CONFIG_CACHE_L2X0 - phys_l2x0_saved_regs = __pa(&l2x0_saved_regs); -#endif - suspend_set_ops(&imx6q_pm_ops); } diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f1555c59b..09a742f8c7ab 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) src_base + SRC_GPR1 + cpu * 8); } +u32 imx_get_cpu_arg(int cpu) +{ + cpu = cpu_logical_map(cpu); + return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); +} + +void imx_set_cpu_arg(int cpu, u32 arg) +{ + cpu = cpu_logical_map(cpu); + writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); +} + void imx_src_prepare_restart(void) { u32 val; diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c index d42730a1d4ab..d599e354ca57 100644 --- a/arch/arm/mach-ixp4xx/vulcan-setup.c +++ b/arch/arm/mach-ixp4xx/vulcan-setup.c @@ -163,6 +163,7 @@ static struct platform_device vulcan_max6369 = { static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = { .pin = 14, + .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device vulcan_w1_gpio = { diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 2e73e9d53f70..d367aa6b47bb 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -41,16 +41,12 @@ static void __init kirkwood_legacy_clk_init(void) struct device_node *np = of_find_compatible_node( NULL, NULL, "marvell,kirkwood-gating-clock"); - struct of_phandle_args clkspec; + struct clk *clk; clkspec.np = np; clkspec.args_count = 1; - clkspec.args[0] = CGC_BIT_GE0; - orion_clkdev_add(NULL, "mv643xx_eth_port.0", - of_clk_get_from_provider(&clkspec)); - clkspec.args[0] = CGC_BIT_PEX0; orion_clkdev_add("0", "pcie", of_clk_get_from_provider(&clkspec)); @@ -59,9 +55,24 @@ static void __init kirkwood_legacy_clk_init(void) orion_clkdev_add("1", "pcie", of_clk_get_from_provider(&clkspec)); - clkspec.args[0] = CGC_BIT_GE1; - orion_clkdev_add(NULL, "mv643xx_eth_port.1", + clkspec.args[0] = CGC_BIT_SDIO; + orion_clkdev_add(NULL, "mvsdio", of_clk_get_from_provider(&clkspec)); + + /* + * The ethernet interfaces forget the MAC address assigned by + * u-boot if the clocks are turned off. Until proper DT support + * is available we always enable them for now. + */ + clkspec.args[0] = CGC_BIT_GE0; + clk = of_clk_get_from_provider(&clkspec); + orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk); + clk_prepare_enable(clk); + + clkspec.args[0] = CGC_BIT_GE1; + clk = of_clk_get_from_provider(&clkspec); + orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk); + clk_prepare_enable(clk); } static void __init kirkwood_of_clk_init(void) diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b2637b0e..e5f70415905a 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c @@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = { .duplex = DUPLEX_FULL, }; +static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(11), +}; + void __init iomega_ix2_200_init(void) { /* * Basic setup. Needs to be called early. */ - kirkwood_ge01_init(&iomega_ix2_200_ge00_data); + kirkwood_ge00_init(&iomega_ix2_200_ge00_data); + kirkwood_ge01_init(&iomega_ix2_200_ge01_data); } diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736cbbf8..08dd739aa709 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c @@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = { static struct mvsdio_platform_data guruplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ + .gpio_card_detect = -1, + .gpio_write_protect = -1, }; static struct gpio_led guruplug_led_pins[] = { diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69fdc937..6a6eb548307d 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c @@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = { static struct mvsdio_platform_data openrd_mvsdio_data = { .gpio_card_detect = 29, /* MPP29 used as SD card detect */ + .gpio_write_protect = -1, }; static unsigned int openrd_mpp_config[] __initdata = { diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b48926b..d24223166e06 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = { static struct mvsdio_platform_data rd88f6281_mvsdio_data = { .gpio_card_detect = 28, + .gpio_write_protect = -1, }; static unsigned int rd88f6281_mpp_config[] __initdata = { diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d1e2d595e79c..f62b68d926f4 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> +#include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027f02fa..f9fd77e8f1f5 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles, { u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); - writel_relaxed(0, event_base + TIMER_CLEAR); + ctrl &= ~TIMER_ENABLE_EN; + writel_relaxed(ctrl, event_base + TIMER_ENABLE); + + writel_relaxed(ctrl, event_base + TIMER_CLEAR); writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); return 0; diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58271de..d5970f5a1e8d 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c @@ -44,6 +44,8 @@ #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) +#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) + #define ACTIVE_DOORBELLS (8) static DEFINE_RAW_SPINLOCK(irq_controller_lock); @@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain; */ static void armada_370_xp_irq_mask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); -#endif } static void armada_370_xp_irq_unmask(struct irq_data *d) { -#ifdef CONFIG_SMP irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) writel(hwirq, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); else writel(hwirq, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#else - writel(irqd_to_hwirq(d), - per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); -#endif } #ifdef CONFIG_SMP @@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { armada_370_xp_irq_mask(irq_get_irq_data(virq)); - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); + if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + writel(hw, per_cpu_int_base + + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); + else + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); irq_set_status_flags(virq, IRQ_LEVEL); - if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { irq_set_percpu_devid(virq); irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, handle_percpu_devid_irq); diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c index 8fb23af154b3..e26eeba46598 100644 --- a/arch/arm/mach-mxs/icoll.c +++ b/arch/arm/mach-mxs/icoll.c @@ -100,7 +100,7 @@ static struct irq_domain_ops icoll_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -void __init icoll_of_init(struct device_node *np, +static void __init icoll_of_init(struct device_node *np, struct device_node *interrupt_parent) { /* diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 052186713347..e7b781d3788f 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = { .lower_margin = 4, .hsync_len = 1, .vsync_len = 1, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, }, }; @@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = { .lower_margin = 10, .hsync_len = 10, .vsync_len = 10, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, }, }; @@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = { .lower_margin = 45, .hsync_len = 1, .vsync_len = 1, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT, }, }; @@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = { .lower_margin = 13, .hsync_len = 48, .vsync_len = 3, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | - FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; @@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = { .lower_margin = 0x15, .hsync_len = 64, .vsync_len = 4, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | - FB_SYNC_DATA_ENABLE_HIGH_ACT | - FB_SYNC_DOTCLK_FAILING_ACT, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; @@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = { .lower_margin = 2, .hsync_len = 15, .vsync_len = 15, - .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT }, }; @@ -259,6 +249,8 @@ static void __init imx23_evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } static inline void enable_clk_enet_out(void) @@ -278,6 +270,8 @@ static void __init imx28_evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } @@ -297,6 +291,7 @@ static void __init m28evk_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); mxsfb_pdata.default_bpp = 16; mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; } static void __init sc_sps1_init(void) @@ -322,6 +317,8 @@ static void __init apx4devkit_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); mxsfb_pdata.default_bpp = 32; mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } #define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) @@ -402,17 +399,18 @@ static void __init cfa10049_init(void) { enable_clk_enet_out(); update_fec_mac_prop(OUI_CRYSTALFONTZ); + + mxsfb_pdata.mode_list = cfa10049_video_modes; + mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); + mxsfb_pdata.default_bpp = 32; + mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; } static void __init cfa10037_init(void) { enable_clk_enet_out(); update_fec_mac_prop(OUI_CRYSTALFONTZ); - - mxsfb_pdata.mode_list = cfa10049_video_modes; - mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); - mxsfb_pdata.default_bpp = 32; - mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; } static void __init apf28_init(void) @@ -423,6 +421,8 @@ static void __init apf28_init(void) mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); mxsfb_pdata.default_bpp = 16; mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; + mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | + MXSFB_SYNC_DOTCLK_FAILING_ACT; } static void __init mxs_machine_init(void) diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c index a4294aa9f301..e63b7d87acbd 100644 --- a/arch/arm/mach-mxs/mm.c +++ b/arch/arm/mach-mxs/mm.c @@ -18,6 +18,7 @@ #include <mach/mx23.h> #include <mach/mx28.h> +#include <mach/common.h> /* * Define the MX23 memory map. diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c index 54add60f94c9..1dff46703753 100644 --- a/arch/arm/mach-mxs/ocotp.c +++ b/arch/arm/mach-mxs/ocotp.c @@ -19,6 +19,7 @@ #include <asm/processor.h> /* for cpu_relax() */ #include <mach/mxs.h> +#include <mach/common.h> #define OCOTP_WORD_OFFSET 0x20 #define OCOTP_WORD_COUNT 0x20 diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae2e3fc..6c4f766365a2 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c @@ -543,15 +543,6 @@ static struct clk usb_dc_ck = { /* Direct from ULPD, no parent */ .rate = 48000000, .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), - .enable_bit = USB_REQ_EN_SHIFT, -}; - -static struct clk usb_dc_ck7xx = { - .name = "usb_dc_ck", - .ops = &clkops_generic, - /* Direct from ULPD, no parent */ - .rate = 48000000, - .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, }; @@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = { CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), - CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), + CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), CLK(NULL, "mclk", &mclk_16xx, CK_16XX), CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index fb18831e88aa..14f7e9920479 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h @@ -31,6 +31,8 @@ #include <plat/i2c.h> +#include <mach/irqs.h> + #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) void omap7xx_map_io(void); #else diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 49ac3dfebef9..8111cd9ff3e5 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -311,9 +311,6 @@ config MACH_OMAP_ZOOM2 default y select OMAP_PACKAGE_CBB select REGULATOR_FIXED_VOLTAGE if REGULATOR - select SERIAL_8250 - select SERIAL_8250_CONSOLE - select SERIAL_CORE_CONSOLE config MACH_OMAP_ZOOM3 bool "OMAP3630 Zoom3 board" @@ -321,9 +318,6 @@ config MACH_OMAP_ZOOM3 default y select OMAP_PACKAGE_CBP select REGULATOR_FIXED_VOLTAGE if REGULATOR - select SERIAL_8250 - select SERIAL_8250_CONSOLE - select SERIAL_CORE_CONSOLE config MACH_CM_T35 bool "CompuLab CM-T35/CM-T3730 modules" diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 0274ff7a2a2b..e54a48060198 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -102,6 +102,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") .init_irq = omap_intc_of_init, .handle_irq = omap3_intc_handle_irq, .init_machine = omap_generic_init, + .init_late = omap3_init_late, .init_time = omap3_sync32k_timer_init, .dt_compat = omap3_boards_compat, .restart = omap3xxx_restart, @@ -119,6 +120,7 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)") .init_irq = omap_intc_of_init, .handle_irq = omap3_intc_handle_irq, .init_machine = omap_generic_init, + .init_late = omap3_init_late, .init_time = omap3_secure_sync32k_timer_init, .dt_compat = omap3_gp_boards_compat, .restart = omap3xxx_restart, diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index f7c4616cbb60..d2ea68ea678a 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c @@ -17,6 +17,7 @@ #include <linux/io.h> #include <linux/gpio.h> #include <linux/leds.h> +#include <linux/usb/phy.h> #include <linux/usb/musb.h> #include <linux/platform_data/spi-omap2-mcspi.h> @@ -98,6 +99,7 @@ static void __init rx51_init(void) sdrc_params = nokia_get_sdram_timings(); omap_sdrc_init(sdrc_params, sdrc_params); + usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); usb_musb_init(&musb_board_data); rx51_peripherals_init(); diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f335f173..0c6834ae1fc4 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -52,6 +52,13 @@ */ #define OMAP4_DPLL_ABE_DEFFREQ 98304000 +/* + * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section + * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred + * locked frequency for the USB DPLL is 960MHz. + */ +#define OMAP4_DPLL_USB_DEFFREQ 960000000 + /* Root clocks */ DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); @@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); +DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, + OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, + OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); + DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); @@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), + CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X), CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), @@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void) if (rc) pr_err("%s: failed to configure ABE DPLL!\n", __func__); + /* + * Lock USB DPLL on OMAP4 devices so that the L3INIT power + * domain can transition to retention state when not in use. + */ + rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); + if (rc) + pr_err("%s: failed to configure USB DPLL!\n", __func__); + return 0; } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 0a6b9c7a63da..d6ba13e1c540 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -108,7 +108,6 @@ void omap35xx_init_late(void); void omap3630_init_late(void); void am35xx_init_late(void); void ti81xx_init_late(void); -void omap4430_init_late(void); int omap2_common_pm_late_init(void); #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) @@ -294,5 +293,8 @@ extern void omap_reserve(void); struct omap_hwmod; extern int omap_dss_reset(struct omap_hwmod *); +/* SoC specific clock initializer */ +extern int (*omap_clk_init)(void); + #endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index e4b16c8efe8b..410e1bac7815 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1122,9 +1122,6 @@ int gpmc_calc_timings(struct gpmc_timings *gpmc_t, /* TODO: remove, see function definition */ gpmc_convert_ps_to_ns(gpmc_t); - /* Now the GPMC is initialised, unreserve the chip-selects */ - gpmc_cs_map = 0; - return 0; } @@ -1383,6 +1380,9 @@ static int gpmc_probe(struct platform_device *pdev) if (IS_ERR_VALUE(gpmc_setup_irq())) dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); + /* Now the GPMC is initialised, unreserve the chip-selects */ + gpmc_cs_map = 0; + rc = gpmc_probe_dt(pdev); if (rc < 0) { clk_disable_unprepare(gpmc_l3_clk); diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd65387b..5c445ca1e271 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -55,6 +55,12 @@ #include "prm44xx.h" /* + * omap_clk_init: points to a function that does the SoC-specific + * clock initializations + */ +int (*omap_clk_init)(void); + +/* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ @@ -397,7 +403,7 @@ void __init omap2420_init_early(void) omap242x_clockdomains_init(); omap2420_hwmod_init(); omap_hwmod_init_postsetup(); - omap2420_clk_init(); + omap_clk_init = omap2420_clk_init; } void __init omap2420_init_late(void) @@ -427,7 +433,7 @@ void __init omap2430_init_early(void) omap243x_clockdomains_init(); omap2430_hwmod_init(); omap_hwmod_init_postsetup(); - omap2430_clk_init(); + omap_clk_init = omap2430_clk_init; } void __init omap2430_init_late(void) @@ -462,7 +468,7 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3430_init_early(void) @@ -500,7 +506,7 @@ void __init ti81xx_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap3xxx_clk_init(); + omap_clk_init = omap3xxx_clk_init; } void __init omap3_init_late(void) @@ -568,7 +574,7 @@ void __init am33xx_init_early(void) am33xx_clockdomains_init(); am33xx_hwmod_init(); omap_hwmod_init_postsetup(); - am33xx_clk_init(); + omap_clk_init = am33xx_clk_init; } #endif @@ -593,7 +599,7 @@ void __init omap4430_init_early(void) omap44xx_clockdomains_init(); omap44xx_hwmod_init(); omap_hwmod_init_postsetup(); - omap4xxx_clk_init(); + omap_clk_init = omap4xxx_clk_init; } void __init omap4430_init_late(void) diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 6a217c98db54..f82cf878d6af 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -211,8 +211,6 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, return -EINVAL; } - pr_err("%s: Could not find signal %s\n", __func__, muxname); - return -ENODEV; } @@ -234,6 +232,8 @@ int __init omap_mux_get_by_name(const char *muxname, return mux_mode; } + pr_err("%s: Could not find signal %s\n", __func__, muxname); + return -ENODEV; } @@ -739,8 +739,9 @@ static void __init omap_mux_dbg_create_entry( list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; - (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, - m, &omap_mux_dbg_signal_fops); + (void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO, + mux_dbg_dir, m, + &omap_mux_dbg_signal_fops); } } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c08c2b..a202a4785104 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if (oh->flags & HWMOD_FORCE_MSTANDBY) { + idlemode = HWMOD_IDLEMODE_FORCE; + } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) @@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh) } if (sf & SYSC_HAS_MIDLEMODE) { - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { + if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || + (oh->flags & HWMOD_FORCE_MSTANDBY)) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b608eda..d5dc935f6060 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm { * * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out * of idle, rather than relying on module smart-idle - * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out - * of standby, rather than relying on module smart-standby + * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and + * out of standby, rather than relying on module smart-standby * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for * SDRAM controller, etc. XXX probably belongs outside the main hwmod file * XXX Should be HWMOD_SETUP_NO_RESET @@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm { * correctly, or this is being abused to deal with some PM latency * issues -- but we're currently suffering from a shortage of * folks who are able to track these issues down properly. + * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device + * is kept in force-standby mode. Failing to do so causes PM problems + * with musb on OMAP3630 at least. Note that musb has a dedicated register + * to control MSTANDBY signal when MIDLEMODE is set to force-standby. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_16BIT_REG (1 << 8) #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) #define HWMOD_BLOCK_WFI (1 << 10) +#define HWMOD_FORCE_MSTANDBY (1 << 11) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03ec952f..5112d04e7b79 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { * Erratum ID: i479 idle_req / idle_ack mechanism potentially * broken when autoidle is enabled * workaround is to disable the autoidle bit at module level. + * + * Enabling the device in any other MIDLEMODE setting but force-idle + * causes core_pwrdm not enter idle states at least on OMAP3630. + * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY + * signal when MIDLEMODE is set to force-idle. */ .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE - | HWMOD_SWSUP_MSTANDBY, + | HWMOD_FORCE_MSTANDBY, }; /* usb_otg_hs */ diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e1687c..eaba9dc91a0d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", .class = &omap44xx_ocp2scp_hwmod_class, .clkdm_name = "l3_init_clkdm", - .main_clk = "func_48m_fclk", + /* + * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP + * block as an "optional clock," and normally should never be + * specified as the main_clk for an OMAP IP block. However it + * turns out that this clock is actually the main clock for + * the ocp2scp_usb_phy IP block: + * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html + * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems + * to be the best workaround. + */ + .main_clk = "ocp2scp_usb_phy_phy_48m", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf17a8f..f62b509ed08d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void) clksrc_nr, clksrc_src) \ void __init omap##name##_gptimer_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ @@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \ clksrc_nr, clksrc_src) \ void __init omap##name##_sync32k_timer_init(void) \ { \ + if (omap_clk_init) \ + omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ /* Enable the use of clocksource="gp_timer" kernel parameter */ \ diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index af41888acbd6..969b0ba7fa70 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -505,6 +505,7 @@ static struct w1_gpio_platform_data w1_gpio_platform_data = { .pin = GPIO_ONE_WIRE, .is_open_drain = 0, .enable_external_pullup = w1_enable_external_pullup, + .ext_pullup_enable_pin = -EINVAL, }; struct platform_device raumfeld_w1_gpio_device = { diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d469e8..1e73f5fa8659 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h @@ -188,10 +188,8 @@ #if defined(CONFIG_CPU_S3C2416) #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) -#elif defined(CONFIG_CPU_S3C2443) -#define NR_IRQS (IRQ_S3C2443_AC97+1) #else -#define NR_IRQS (IRQ_S3C2440_AC97+1) +#define NR_IRQS (IRQ_S3C2443_AC97 + 1) #endif /* compatibility define. */ diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e011e73..d8ba9bee4c7e 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c @@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np, base = (void *)0xfd000000; intc->reg_mask = base + 0xa4; - intc->reg_pending = base + 0x08; + intc->reg_pending = base + 0xa8; irq_num = 20; irq_start = S3C2410_IRQ(32); irq_offset = 4; diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index fcdf52dbcc49..f051f53e35b7 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c @@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = { .name = "pcmcdclk", }; -static struct clk dummy_apb_pclk = { - .name = "apb_pclk", - .id = -1, -}; - static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, @@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = { static struct clk init_clocks_off[] = { { - .name = "dma", - .devname = "dma-pl330.0", - .parent = &clk_hclk_psys.clk, - .enable = s5pv210_clk_ip0_ctrl, - .ctrlbit = (1 << 3), - }, { - .name = "dma", - .devname = "dma-pl330.1", - .parent = &clk_hclk_psys.clk, - .enable = s5pv210_clk_ip0_ctrl, - .ctrlbit = (1 << 4), - }, { .name = "rot", .parent = &clk_hclk_dsys.clk, .enable = s5pv210_clk_ip0_ctrl, @@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = { .ctrlbit = (1<<19), }; +static struct clk clk_pdma0 = { + .name = "pdma0", + .parent = &clk_hclk_psys.clk, + .enable = s5pv210_clk_ip0_ctrl, + .ctrlbit = (1 << 3), +}; + +static struct clk clk_pdma1 = { + .name = "pdma1", + .parent = &clk_hclk_psys.clk, + .enable = s5pv210_clk_ip0_ctrl, + .ctrlbit = (1 << 4), +}; + static struct clk *clkset_uart_list[] = { [6] = &clk_mout_mpll.clk, [7] = &clk_mout_epll.clk, @@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = { &clk_hsmmc1, &clk_hsmmc2, &clk_hsmmc3, + &clk_pdma0, + &clk_pdma1, }; /* Clock initialisation code */ @@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = { CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), + CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), + CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), }; void __init s5pv210_register_clocks(void) @@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void) for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) s3c_disable_clocks(clk_cdev[ptr], 1); - s3c24xx_register_clock(&dummy_apb_pclk); s3c_pwmclk_init(); } diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 3a38f7b34b94..e373de44a8b6 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = { .mux_id = 0, .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_VSYNC_ACTIVE_LOW, - .bus_type = FIMC_BUS_TYPE_ITU_601, + .fimc_bus_type = FIMC_BUS_TYPE_ITU_601, .board_info = &noon010pc30_board_info, .i2c_bus_num = 0, .clk_frequency = 16000000UL, diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index cdcb799e802f..fec49ebc359a 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c @@ -32,6 +32,7 @@ #include <linux/smsc911x.h> #include <linux/spi/spi.h> #include <linux/spi/sh_hspi.h> +#include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/usb/otg.h> diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index f9d754f90c59..d2b3937c4014 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c @@ -14,7 +14,7 @@ #define pr_fmt(fmt) "SPEAr3xx: " fmt #include <linux/amba/pl022.h> -#include <linux/amba/pl08x.h> +#include <linux/amba/pl080.h> #include <linux/io.h> #include <plat/pl080.h> #include <mach/generic.h> diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c27102..7f2cb6c5e2c1 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { #endif struct mmci_platform_data mop500_sdi0_data = { - .ios_handler = mop500_sdi0_ios_handler, .ocr_mask = MMC_VDD_29_30, .f_max = 50000000, .capabilities = MMC_CAP_4_BIT_DATA | diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b03457881c4b..87d2d7b38ce9 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/clk.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/platform_data/i2c-nomadik.h> @@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev) regulator_put(prox_regulator); } +void mop500_snowball_ethernet_clock_enable(void) +{ + struct clk *clk; + + clk = clk_get_sys("fsmc", NULL); + if (!IS_ERR(clk)) + clk_prepare_enable(clk); +} + static struct cryp_platform_data u8500_cryp1_platform_data = { .mem_to_engine = { .dir = STEDMA40_MEM_TO_PERIPH, @@ -683,6 +693,8 @@ static void __init snowball_init_machine(void) mop500_audio_init(parent); mop500_uart_init(parent); + mop500_snowball_ethernet_clock_enable(); + /* This board has full regulator constraints */ regulator_has_full_constraints(); } diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f5d90d..d38951be70df 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void); void __init snowball_pinmaps_init(void); void __init hrefv60_pinmaps_init(void); void mop500_audio_init(struct device *parent); +void mop500_snowball_ethernet_clock_enable(void); int __init mop500_uib_init(void); void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf7bbe3..f1a581844372 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -312,9 +312,10 @@ static void __init u8500_init_machine(void) /* Pinmaps must be in place before devices register */ if (of_machine_is_compatible("st-ericsson,mop500")) mop500_pinmaps_init(); - else if (of_machine_is_compatible("calaosystems,snowball-a9500")) + else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { snowball_pinmaps_init(); - else if (of_machine_is_compatible("st-ericsson,hrefv60+")) + mop500_snowball_ethernet_clock_enable(); + } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) hrefv60_pinmaps_init(); else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} /* TODO: Add pinmaps for ccu9540 board. */ diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d17328730..4045c4931a30 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -43,7 +43,7 @@ config CPU_ARM740T depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_CACHE_V3 # although the core is v4t + select CPU_CACHE_V4 select CPU_CP15_MPU select CPU_PABRT_LEGACY help @@ -469,9 +469,6 @@ config CPU_PABRT_V7 bool # The cache model -config CPU_CACHE_V3 - bool - config CPU_CACHE_V4 bool diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa2756f..9e51be96f635 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o -obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc3..48bc3c0a87ce 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) int lockregs; int i; - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: lockregs = 8; break; @@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) if (cache_id_part_number_from_dt) cache_id = cache_id_part_number_from_dt; else - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) - & L2X0_CACHE_ID_PART_MASK; + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; /* Determine the number of ways */ - switch (cache_id) { + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: if (aux & (1 << 16)) ways = 16; @@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { .flush_all = l2x0_flush_all, .inv_all = l2x0_inv_all, .disable = l2x0_disable, - .set_debug = pl310_set_debug, }, }; @@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) data->save(); of_init = true; - l2x0_init(l2x0_base, aux_val, aux_mask); - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); + l2x0_init(l2x0_base, aux_val, aux_mask); return 0; } diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fadece8d3..000000000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null @@ -1,137 +0,0 @@ -/* - * linux/arch/arm/mm/cache-v3.S - * - * Copyright (C) 1997-2002 Russell king - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/page.h> -#include "proc-macros.S" - -/* - * flush_icache_all() - * - * Unconditionally clean and invalidate the entire icache. - */ -ENTRY(v3_flush_icache_all) - mov pc, lr -ENDPROC(v3_flush_icache_all) - -/* - * flush_user_cache_all() - * - * Invalidate all cache entries in a particular address - * space. - * - * - mm - mm_struct describing address space - */ -ENTRY(v3_flush_user_cache_all) - /* FALLTHROUGH */ -/* - * flush_kern_cache_all() - * - * Clean and invalidate the entire cache. - */ -ENTRY(v3_flush_kern_cache_all) - /* FALLTHROUGH */ - -/* - * flush_user_cache_range(start, end, flags) - * - * Invalidate a range of cache entries in the specified - * address space. - * - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) - * - flags - vma_area_struct flags describing address space - */ -ENTRY(v3_flush_user_cache_range) - mov ip, #0 - mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * coherent_kern_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_kern_range) - /* FALLTHROUGH */ - -/* - * coherent_user_range(start, end) - * - * Ensure coherency between the Icache and the Dcache in the - * region described by start. If you have non-snooping - * Harvard caches, you need to implement this function. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_coherent_user_range) - mov r0, #0 - mov pc, lr - -/* - * flush_kern_dcache_area(void *page, size_t size) - * - * Ensure no D cache aliasing occurs, either with itself or - * the I cache - * - * - addr - kernel address - * - size - region size - */ -ENTRY(v3_flush_kern_dcache_area) - /* FALLTHROUGH */ - -/* - * dma_flush_range(start, end) - * - * Clean and invalidate the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_flush_range) - mov r0, #0 - mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - mov pc, lr - -/* - * dma_unmap_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_unmap_area) - teq r2, #DMA_TO_DEVICE - bne v3_dma_flush_range - /* FALLTHROUGH */ - -/* - * dma_map_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(v3_dma_map_area) - mov pc, lr -ENDPROC(v3_dma_unmap_area) -ENDPROC(v3_dma_map_area) - - .globl v3_flush_kern_cache_louis - .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all - - __INITDATA - - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) - define_cache_functions v3 diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be677..a7ba68f59f0c 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all) ENTRY(v4_flush_user_cache_range) #ifdef CONFIG_CPU_CP15 mov ip, #0 - mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache + mcr p15, 0, ip, c7, c7, 0 @ flush ID cache mov pc, lr #else /* FALLTHROUGH */ diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -static DEFINE_PER_CPU(atomic64_t, active_asids); +DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; @@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); + dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c7e3759f16d3..e9db6b4bf65a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -342,6 +342,7 @@ static int __init atomic_pool_init(void) { struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); + gfp_t gfp = GFP_KERNEL | GFP_DMA; unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; @@ -361,8 +362,8 @@ static int __init atomic_pool_init(void) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, atomic_pool_init); else - ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, - &page, atomic_pool_init); + ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, + atomic_pool_init); if (ptr) { int i; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..a84ff763ac39 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -34,6 +34,7 @@ #include <asm/mach/pci.h> #include "mm.h" +#include "tcm.h" /* * empty_zero_page is a special page that is used for @@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys, - const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) { - pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE /* - * Try a section mapping - end, addr and phys must all be aligned - * to a section boundary. Note that PMDs refer to the individual - * L1 entries, whereas PGDs refer to a group of L1 entries making - * up one logical pointer to an L2 table. + * In classic MMU format, puds and pmds are folded in to + * the pgds. pmd_offset gives the PGD entry. PGDs refer to a + * group of L1 entries making up one logical pointer to + * an L2 table (2MB), where as PMDs refer to the individual + * L1 entries (1MB). Hence increment to get the correct + * offset for odd 1MB sections. + * (See arch/arm/include/asm/pgtable-2level.h) */ - if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { - pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE - if (addr & SECTION_SIZE) - pmd++; + if (addr & SECTION_SIZE) + pmd++; #endif + do { + *pmd = __pmd(phys | type->prot_sect); + phys += SECTION_SIZE; + } while (pmd++, addr += SECTION_SIZE, addr != end); - do { - *pmd = __pmd(phys | type->prot_sect); - phys += SECTION_SIZE; - } while (pmd++, addr += SECTION_SIZE, addr != end); + flush_pmd_entry(pmd); +} - flush_pmd_entry(p); - } else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, + unsigned long end, phys_addr_t phys, + const struct mem_type *type) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { /* - * No need to loop; pte's aren't interested in the - * individual L1 entries. + * With LPAE, we must loop over to map + * all the pmds for the given range. */ - alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); - } + next = pmd_addr_end(addr, end); + + /* + * Try a section mapping - addr, next and phys must all be + * aligned to a section boundary. + */ + if (type->prot_sect && + ((addr | next | phys) & ~SECTION_MASK) == 0) { + map_init_section(pmd, addr, next, phys, type); + } else { + alloc_init_pte(pmd, addr, next, + __phys_to_pfn(phys), type); + } + + phys += next - addr; + + } while (pmd++, addr = next, addr != end); } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, do { next = pud_addr_end(addr, end); - alloc_init_section(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type); phys += next - addr; } while (pud++, addr = next, addr != end); } @@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc) dma_contiguous_remap(); devicemaps_init(mdesc); kmap_init(); + tcm_init(); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f20..fde2d2a794cf 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -77,24 +77,27 @@ __arm740_setup: mcr p15, 0, r0, c6, c0 @ set area 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit mcr p15, 0, r0, c6, c1 @ set area 1, RAM ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 + ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) + cmp r3, #0 + moveq r0, #0 + beq 2f + mov r4, #10 @ 11 is the minimum (4KB) +1: add r4, r4, #1 @ area size *= 2 + movs r3, r3, lsr #1 bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value + orr r0, r0, r4, lsl #1 @ the area register value orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH +2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH mov r0, #0x06 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable @@ -137,13 +140,14 @@ __arm740_proc_info: .long 0x41807400 .long 0xfffffff0 .long 0 + .long 0 b __arm740_setup .long cpu_arch_name .long cpu_elf_name - .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT .long cpu_arm740_name .long arm740_processor_functions .long 0 .long 0 - .long v3_cache_fns @ cache model + .long v4_cache_fns @ cache model .size __arm740_proc_info, . - __arm740_proc_info diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5e..2556cf1c2da1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e2972..344c8a548cc0 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d6..0b60dd3d742a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd84..d92dfd081429 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b4d6d4..054b491ff764 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -17,7 +17,9 @@ #ifndef MULTI_CPU EXPORT_SYMBOL(cpu_dcache_clean_area); +#ifdef CONFIG_MMU EXPORT_SYMBOL(cpu_set_pte_ext); +#endif #else EXPORT_SYMBOL(processor); #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8de9325..5c07ee4fe3eb 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -420,7 +420,7 @@ __v7_pj4b_proc_info: __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* @@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV + __v7_proc __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* + * Qualcomm Inc. Krait processors. + */ + .type __krait_proc_info, #object +__krait_proc_info: + .long 0x510f0400 @ Required ID value + .long 0xff0ffc00 @ Mask for ID + /* + * Some Krait processors don't indicate support for SDIV and UDIV + * instructions in the ARM instruction set, even though they actually + * do support them. + */ + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV + .size __krait_proc_info, . - __krait_proc_info + + /* * Match any ARMv7 processor core. */ .type __v7_proc_info, #object diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f35..e8efd83b6f25 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa18..e766f889bfd6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h index 8015ad434a40..8015ad434a40 100644 --- a/arch/arm/kernel/tcm.h +++ b/arch/arm/mm/tcm.h diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6828ef6ce80e..a0bd8a755bdf 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -576,7 +576,7 @@ load_ind: /* x = ((*(frame + k)) & 0xf) << 2; */ ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; /* the interpreter should deal with the negative K */ - if (k < 0) + if ((int)k < 0) return -1; /* offset in r1: we might have to take the slow path */ emit_mov_i(r_off, k, ctx); diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c index febe3862873c..807ac8e5cbc0 100644 --- a/arch/arm/plat-orion/addr-map.c +++ b/arch/arm/plat-orion/addr-map.c @@ -157,9 +157,12 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg, u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i)); /* - * Chip select enabled? + * We only take care of entries for which the chip + * select is enabled, and that don't have high base + * address bits set (devices can only access the first + * 32 bits of the memory). */ - if (size & 1) { + if ((size & 1) && !(base & 0xF)) { struct mbus_dram_window *w; w = &orion_mbus_dram_info.cs[cs++]; diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig index 739d016eb273..8a08c31b5e20 100644 --- a/arch/arm/plat-spear/Kconfig +++ b/arch/arm/plat-spear/Kconfig @@ -10,7 +10,7 @@ choice config ARCH_SPEAR13XX bool "ST SPEAr13xx with Device Tree" - select ARCH_HAVE_CPUFREQ + select ARCH_HAS_CPUFREQ select ARM_GIC select CPU_V7 select GPIO_SPEAR_SPICS diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd70a68387eb..9b6d19f74078 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -9,7 +9,6 @@ config ARM64 select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS - select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 51493430f142..1a6bfe954d49 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -6,17 +6,6 @@ config FRAME_POINTER bool default y -config DEBUG_ERRORS - bool "Verbose kernel error messages" - depends on DEBUG_KERNEL - help - This option controls verbose debugging information which can be - printed when the kernel detects an internal error. This debugging - information is useful to kernel hackers when tracking down problems, - but mostly meaningless to other people. It's safe to say Y unless - you are concerned with the code size or don't want to see these - messages. - config DEBUG_STACK_USAGE bool "Enable stack utilization instrumentation" depends on DEBUG_KERNEL diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 9212c7880da7..09bef29f3a09 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO=y # CONFIG_FTRACE is not set CONFIG_ATOMIC64_SELFTEST=y -CONFIG_DEBUG_ERRORS=y diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h index bde960720892..42e04c877428 100644 --- a/arch/arm64/include/asm/ucontext.h +++ b/arch/arm64/include/asm/ucontext.h @@ -22,7 +22,7 @@ struct ucontext { stack_t uc_stack; sigset_t uc_sigmask; /* glibc uses a 1024-bit sigset_t */ - __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; /* last for future expansion */ struct sigcontext uc_mcontext; }; diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index cef3925eaf60..aa3e948f7885 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__clear_user); /* bitops */ +#ifdef CONFIG_SMP EXPORT_SYMBOL(__atomic_hash); +#endif /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 7f4f3673f2bc..e393174fe859 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct compat_rt_sigframe __user *frame; - compat_stack_t stack; int err = 0; frame = compat_get_sigframe(ka, regs, sizeof(*frame)); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 224b44ab534e..70b8cd4021c4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) { unsigned long size, mask; - bool page64k = IS_ENABLED(ARM64_64K_PAGES); + bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES); pgd_t *pgd; pud_t *pud; pmd_t *pmd; diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 9b89257b2cfd..c1a868d398bd 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -7,7 +7,7 @@ config AVR32 select HAVE_OPROFILE select HAVE_KPROBES select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_ATOMIC64 select HARDIRQS_SW_RESEND diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index cf60d0a9f176..fc6483f83ccc 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32) #define readw_be __raw_readw #define readl_be __raw_readl +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel + #define writeb_be __raw_writeb #define writew_be __raw_writew #define writel_be __raw_writel diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 600494c70e96..c3f2e0bc644a 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -33,7 +33,7 @@ config BLACKFIN select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_WANT_IPC_PARSE_VERSION select HAVE_GENERIC_HARDIRQS select GENERIC_ATOMIC64 diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09e18c3..2c71d5634ec2 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h @@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) /* set interrupt enabled status */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); + asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); } /* unconditionally enable interrupts */ diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index bb0ac66cf533..06dd026533e3 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -43,7 +43,7 @@ config CRIS select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_IOMAP diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 12369b194c7b..2ce731f9aa4d 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -6,7 +6,7 @@ config FRV select HAVE_PERF_EVENTS select HAVE_UID16 select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_SHOW select HAVE_DEBUG_BUGVERBOSE select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index ae8551eb3736..79250de1b12a 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -5,7 +5,7 @@ config H8300 select HAVE_GENERIC_HARDIRQS select GENERIC_ATOMIC64 select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 33f3fdc0b214..e7e55a00f94f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -26,7 +26,7 @@ config IA64 select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_VIRT_CPU_ACCOUNTING - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -187,7 +187,7 @@ config IA64_DIG config IA64_DIG_VTD bool "DIG+Intel+IOMMU" - select DMAR + select INTEL_IOMMU select PCI_MSI config IA64_HP_ZX1 diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index d2bf1fd5e44f..76acbcd5c060 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; unsigned long prev; __asm__ __volatile__( " mf;; \n" - " mov %0=r0 \n" " mov ar.ccv=%4;; \n" "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8), "=r" (prev) + : "+r" (r8), "=&r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 43f96ab18fa0..8c7096168716 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; extern int cpe_vector; extern int ia64_cpe_irq; extern void ia64_mca_init(void); +extern void ia64_mca_irq_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 2e27ef175652..2db0a6c6daa5 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h @@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr); extern void map_cpu_to_node(int cpu, int nid); extern void unmap_cpu_from_node(int cpu, int nid); - +extern void numa_clear_node(int cpu); #else /* !CONFIG_NUMA */ #define map_cpu_to_node(cpu, nid) do{}while(0) #define unmap_cpu_from_node(cpu, nid) do{}while(0) - #define paddr_to_nid(addr) 0 - +#define numa_clear_node(cpu) do { } while (0) #endif /* CONFIG_NUMA */ #endif /* _ASM_IA64_NUMA_H */ diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c4cd45d97749..abc6dee3799c 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -90,53 +90,6 @@ ENTRY(fsys_getpid) FSYS_RETURN END(fsys_getpid) -ENTRY(fsys_getppid) - .prologue - .altrp b6 - .body - add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 - ;; - ld8 r17=[r17] // r17 = current->group_leader - add r9=TI_FLAGS+IA64_TASK_SIZE,r16 - ;; - - ld4 r9=[r9] - add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = ¤t->group_leader->real_parent - ;; - and r9=TIF_ALLWORK_MASK,r9 - -1: ld8 r18=[r17] // r18 = current->group_leader->real_parent - ;; - cmp.ne p8,p0=0,r9 - add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = ¤t->group_leader->real_parent->tgid - ;; - - /* - * The .acq is needed to ensure that the read of tgid has returned its data before - * we re-check "real_parent". - */ - ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid -#ifdef CONFIG_SMP - /* - * Re-read current->group_leader->real_parent. - */ - ld8 r19=[r17] // r19 = current->group_leader->real_parent -(p8) br.spnt.many fsys_fallback_syscall - ;; - cmp.ne p6,p0=r18,r19 // did real_parent change? - mov r19=0 // i must not leak kernel bits... -(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check - ;; - mov r17=0 // i must not leak kernel bits... - mov r18=0 // i must not leak kernel bits... -#else - mov r17=0 // i must not leak kernel bits... - mov r18=0 // i must not leak kernel bits... - mov r19=0 // i must not leak kernel bits... -#endif - FSYS_RETURN -END(fsys_getppid) - ENTRY(fsys_set_tid_address) .prologue .altrp b6 @@ -614,7 +567,7 @@ paravirt_fsyscall_table: data8 0 // chown data8 0 // lseek // 1040 data8 fsys_getpid // getpid - data8 fsys_getppid // getppid + data8 0 // getppid data8 0 // mount data8 0 // umount data8 0 // setuid // 1045 diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index ee33c3aaa2fc..19f107be734e 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -76,7 +76,7 @@ * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * * Note: The term "IRQ" is loosely used everywhere in Linux kernel to - * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ + * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ * (isa_irq) is the only exception in this source code. */ @@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) return 0; } +static int +iosapic_delete_rte(unsigned int irq, unsigned int gsi) +{ + struct iosapic_rte_info *rte, *temp; + + list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes, + rte_list) { + if (rte->iosapic->gsi_base + rte->rte_index == gsi) { + if (rte->refcnt) + return -EBUSY; + + list_del(&rte->rte_list); + kfree(rte); + return 0; + } + } + + return -EINVAL; +} + int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) { int num_rte, err, index; @@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) int iosapic_remove(unsigned int gsi_base) { - int index, err = 0; + int i, irq, index, err = 0; unsigned long flags; spin_lock_irqsave(&iosapic_lock, flags); @@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base) goto out; } + for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) { + irq = __gsi_to_irq(i); + if (irq < 0) + continue; + + err = iosapic_delete_rte(irq, i); + if (err) + goto out; + } + iounmap(iosapic_lists[index].addr); iosapic_free(index); out: diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606613eb..f2c418281130 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <asm/mca.h> + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ + ia64_mca_irq_init(); + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 65bf9cd39044..d7396dbb07bb 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2074,22 +2074,16 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } + /* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init(). */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void) { - if (!mca_init) - return 0; - /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2108,6 +2102,23 @@ ia64_mca_late_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif +} + +/* + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status + */ +static int __init +ia64_mca_late_init(void) +{ + if (!mca_init) + return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 9392e021c93b..94f8bf777afa 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -349,7 +349,7 @@ init_record_index_pools(void) /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; - slidx_pool.buffer = (slidx_list_t *) + slidx_pool.buffer = kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60a..79521d5499f9 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={ #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - * - +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; static struct proc_dir_entry *palinfo_dir; /* @@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi static void __cpuinit create_palinfo_proc_entries(unsigned int cpu) { -# define CPUSTR "cpu%d" - pal_func_cpu_u_t f; - struct proc_dir_entry **pdir; struct proc_dir_entry *cpu_dir; int j; - char cpustr[sizeof(CPUSTR)]; - - - /* - * we keep track of created entries in a depth-first order for - * cleanup purposes. Each entry is stored into palinfo_proc_entries - */ - sprintf(cpustr,CPUSTR, cpu); + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); + if (!cpu_dir) + return; f.req_cpu = cpu; - /* - * Compute the location to store per cpu entries - * We dont store the top level entry in this list, but - * remove it finally after removing all cpu entries. - */ - pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; - *pdir++ = cpu_dir; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - *pdir = create_proc_read_entry( - palinfo_entries[j].name, 0, cpu_dir, - palinfo_read_entry, (void *)f.value); - pdir++; + create_proc_read_entry( + palinfo_entries[j].name, 0, cpu_dir, + palinfo_read_entry, (void *)f.value); } } static void remove_palinfo_proc_entries(unsigned int hcpu) { - int j; - struct proc_dir_entry *cpu_dir, **pdir; - - pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; - cpu_dir = *pdir; - *pdir++=NULL; - for (j=0; j < (NR_PALINFO_ENTRIES); j++) { - if ((*pdir)) { - remove_proc_entry ((*pdir)->name, cpu_dir); - *pdir ++= NULL; - } - } - - if (cpu_dir) { - remove_proc_entry(cpu_dir->name, palinfo_dir); - } + char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ + sprintf(cpustr, "cpu%d", hcpu); + remove_proc_subtree(cpustr, palinfo_dir); } static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1058,6 +1019,8 @@ palinfo_init(void) printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); + if (!palinfo_dir) + return -ENOMEM; /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { @@ -1073,22 +1036,8 @@ palinfo_init(void) static void __exit palinfo_exit(void) { - int i = 0; - - /* remove all nodes: depth first pass. Could optimize this */ - for_each_online_cpu(i) { - remove_palinfo_proc_entries(i); - } - - /* - * Remove the top level entry finally - */ - remove_proc_entry(palinfo_dir->name, NULL); - - /* - * Unregister from cpu notifier callbacks - */ unregister_hotcpu_notifier(&palinfo_cpu_notifier); + remove_proc_subtree("pal", NULL); } module_init(palinfo_init); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index e34f565f595a..6f7dc8b7b35c 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -291,7 +291,6 @@ cpu_idle (void) } if (!need_resched()) { - void (*idle)(void); #ifdef CONFIG_SMP min_xtp(); #endif @@ -299,9 +298,7 @@ cpu_idle (void) if (mark_idle) (*mark_idle)(1); - if (!idle) - idle = default_idle; - (*idle)(); + default_idle(); if (mark_idle) (*mark_idle)(0); #ifdef CONFIG_SMP diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 4332f7ee5203..a7869f8f49a6 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) "srlz.d;;" "ssm psr.i;;" "srlz.d;;" - : "=r"(ret) : "r"(iha), "r"(pte):"memory"); + : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); return ret; } diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 3dccdd8eb275..43964cde6214 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -16,7 +16,7 @@ #include <asm/meminit.h> static inline void __iomem * -__ioremap (unsigned long phys_addr) +__ioremap_uc(unsigned long phys_addr) { return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } @@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr) void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size) { - return __ioremap(phys_addr); + u64 attr; + attr = kern_mem_attribute(phys_addr, size); + if (attr & EFI_MEMORY_WB) + return (void __iomem *) phys_to_virt(phys_addr); + return __ioremap_uc(phys_addr); } void __iomem * @@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size) if (attr & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); else if (attr & EFI_MEMORY_UC) - return __ioremap(phys_addr); + return __ioremap_uc(phys_addr); /* * Some chipsets don't support UC access to memory. If @@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size) return (void __iomem *) (offset + (char __iomem *)addr); } - return __ioremap(phys_addr); + return __ioremap_uc(phys_addr); } EXPORT_SYMBOL(ioremap); @@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return NULL; - return __ioremap(phys_addr); + return __ioremap_uc(phys_addr); } EXPORT_SYMBOL(ioremap_nocache); diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 3efea7d0a351..def782e31aac 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -73,6 +73,11 @@ int __meminit __early_pfn_to_nid(unsigned long pfn) return -1; } +void __cpuinit numa_clear_node(int cpu) +{ + unmap_cpu_from_node(cpu, NUMA_NO_NODE); +} + #ifdef CONFIG_MEMORY_HOTPLUG /* * SRAT information is stored in node_memblk[], then we can use SRAT diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 14c1711238c0..e35f6485c1fd 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -490,11 +490,14 @@ static int __init tiocx_init(void) { cnodeid_t cnodeid; int found_tiocx_device = 0; + int err; if (!ia64_platform_is("sn2")) return 0; - bus_register(&tiocx_bus_type); + err = bus_register(&tiocx_bus_type); + if (err) + return err; for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { nasid_t nasid; diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 92623818a1fe..bcd17b206571 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -10,7 +10,7 @@ config M32R select ARCH_WANT_IPC_PARSE_VERSION select HAVE_DEBUG_BUGVERBOSE select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 diff --git a/arch/m32r/include/uapi/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h index da4518f82d6d..98470fe483b6 100644 --- a/arch/m32r/include/uapi/asm/stat.h +++ b/arch/m32r/include/uapi/asm/stat.h @@ -63,10 +63,10 @@ struct stat64 { long long st_size; unsigned long st_blksize; -#if defined(__BIG_ENDIAN) +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) unsigned long __pad4; /* future possible st_blocks high bits */ unsigned long st_blocks; /* Number 512-byte blocks allocated. */ -#elif defined(__LITTLE_ENDIAN) +#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) unsigned long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long __pad4; /* future possible st_blocks high bits */ #else diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 0e708c78e01c..6de813370b8c 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -8,7 +8,7 @@ config M68K select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 select HAVE_UID16 - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER if MMU diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 7cdf6b010381..7240584d3439 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -310,7 +310,6 @@ config COBRA5282 config SOM5282EM bool "EMAC.Inc SOM5282EM board support" depends on M528x - select EMAC_INC help Support for the EMAC.Inc SOM5282EM module. diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h index a337e56d09bf..4ebf098b8a1f 100644 --- a/arch/m68k/include/asm/MC68328.h +++ b/arch/m68k/include/asm/MC68328.h @@ -293,7 +293,7 @@ /* * Here go the bitmasks themselves */ -#define IMR_MSPIM (1 << SPIM _IRQ_NUM) /* Mask SPI Master interrupt */ +#define IMR_MSPIM (1 << SPIM_IRQ_NUM) /* Mask SPI Master interrupt */ #define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */ #define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */ #define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */ @@ -327,7 +327,7 @@ #define IWR_ADDR 0xfffff308 #define IWR LONG_REF(IWR_ADDR) -#define IWR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ +#define IWR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ #define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ #define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ #define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ @@ -357,7 +357,7 @@ #define ISR_ADDR 0xfffff30c #define ISR LONG_REF(ISR_ADDR) -#define ISR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ +#define ISR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ #define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ #define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ #define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ @@ -391,7 +391,7 @@ #define IPR_ADDR 0xfffff310 #define IPR LONG_REF(IPR_ADDR) -#define IPR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ +#define IPR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ #define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ #define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ #define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ @@ -757,7 +757,7 @@ /* 'EZ328-compatible definitions */ #define TCN_ADDR TCN1_ADDR -#define TCN TCN +#define TCN TCN1 /* * Timer Unit 1 and 2 Status Registers diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 4395ffc51fdb..8cc83431805b 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h @@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio) return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); } +static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) +{ + int err; + + err = gpio_request(gpio, label); + if (err) + return err; + + if (flags & GPIOF_DIR_IN) + err = gpio_direction_input(gpio); + else + err = gpio_direction_output(gpio, + (flags & GPIOF_INIT_HIGH) ? 1 : 0); + + if (err) + gpio_free(gpio); + + return err; +} + #endif diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 71fb29938dba..911ba472e6c4 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -57,6 +57,9 @@ void (*mach_reset)(void); void (*mach_halt)(void); void (*mach_power_off)(void); +#ifdef CONFIG_M68000 +#define CPU_NAME "MC68000" +#endif #ifdef CONFIG_M68328 #define CPU_NAME "MC68328" #endif diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index afd8106fd83b..519aad8fa812 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -188,7 +188,7 @@ void __init mem_init(void) } } -#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) +#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) /* insert pointer tables allocated so far into the tablelist */ init_pointer_table((unsigned long)kernel_pg_dir); for (i = 0; i < PTRS_PER_PGD; i++) { diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c index 83b7dad7a84e..b03a9d271837 100644 --- a/arch/m68k/platform/coldfire/m528x.c +++ b/arch/m68k/platform/coldfire/m528x.c @@ -69,7 +69,7 @@ static void __init m528x_uarts_init(void) u8 port; /* make sure PUAPAR is set for UART0 and UART1 */ - port = readb(MCF5282_GPIO_PUAPAR); + port = readb(MCFGPIO_PUAPAR); port |= 0x03 | (0x03 << 2); writeb(port, MCFGPIO_PUAPAR); } diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 7843d11156e6..1323fa2530eb 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -19,7 +19,7 @@ config MICROBLAZE select HAVE_DEBUG_KMEMLEAK select IRQ_DOMAIN select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ae9c716c46bb..51244bf97271 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,7 @@ config MIPS select HAVE_KRETPROBES select HAVE_DEBUG_KMEMLEAK select ARCH_BINFMT_ELF_RANDOMIZE_PIE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT select RTC_LIB if !MACH_LOONGSON select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE @@ -38,7 +38,7 @@ config MIPS select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE select HAVE_MOD_ARCH_SPECIFIC - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select CLONE_BACKWARDS @@ -657,7 +657,7 @@ config SNI_RM bool "SNI RM200/300/400" select FW_ARC if CPU_LITTLE_ENDIAN select FW_ARC32 if CPU_LITTLE_ENDIAN - select SNIPROM if CPU_BIG_ENDIAN + select FW_SNIPROM if CPU_BIG_ENDIAN select ARCH_MAY_HAVE_PC_FDC select BOOT_ELF32 select CEVT_R4K @@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION config FW_ARC32 bool -config SNIPROM +config FW_SNIPROM bool config BOOT_ELF32 @@ -1493,7 +1493,6 @@ config CPU_XLP select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM - select CPU_HAS_LLSC select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_HAS_PREFETCH diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c29508..9aa7d44898ed 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -745,10 +745,7 @@ void __init board_prom_init(void) strcpy(cfe_version, "unknown"); printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); - if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { - printk(KERN_ERR PFX "invalid nvram checksum\n"); - return; - } + bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); board_name = bcm63xx_nvram_get_name(); /* find board by name */ diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 620611680839..a4b8864f9307 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -38,7 +38,7 @@ struct bcm963xx_nvram { static struct bcm963xx_nvram nvram; static int mac_addr_used; -int __init bcm63xx_nvram_init(void *addr) +void __init bcm63xx_nvram_init(void *addr) { unsigned int check_len; u32 crc, expected_crc; @@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) crc = crc32_le(~0, (u8 *)&nvram, check_len); if (crc != expected_crc) - return -EINVAL; - - return 0; + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", + expected_crc, crc); } u8 *bcm63xx_nvram_get_name(void) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788c..35e18e98beb9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) return board_register_devices(); } -device_initcall(bcm63xx_register_devices); +arch_initcall(bcm63xx_register_devices); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f743..b0baa299f899 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) static void octeon_generic_shutdown(void) { - int cpu, i; + int i; +#ifdef CONFIG_SMP + int cpu; +#endif struct cvmx_bootmem_desc *bootmem_desc; void *named_block_array_ptr; diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b7..4e0b6bc1165e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -9,10 +9,8 @@ * * Initialized the local nvram copy from the target address and checks * its checksum. - * - * Returns 0 on success. */ -int __init bcm63xx_nvram_init(void *nvram); +void bcm63xx_nvram_init(void *nvram); /** * bcm63xx_nvram_get_name() - returns the board name according to nvram diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c828419037..193c0912d38e 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,11 +28,7 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ -#ifdef CONFIG_CPU_HAS_LLSC #define cpu_has_llsc 1 -#else -#define cpu_has_llsc 0 -#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906a..0da44d422f5b 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1166,7 +1166,10 @@ do { \ unsigned int __dspctl; \ \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " rddsp %0, %x1 \n" \ + " .set pop \n" \ : "=r" (__dspctl) \ : "i" (mask)); \ __dspctl; \ @@ -1175,30 +1178,198 @@ do { \ #define wrdsp(val, mask) \ do { \ __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ " wrdsp %0, %x1 \n" \ + " .set pop \n" \ : \ : "r" (val), "i" (mask)); \ } while (0) -#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) -#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) -#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) -#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) - -#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) -#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) -#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) -#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) - -#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) -#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) -#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) -#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) - -#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) -#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) -#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) -#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) +#define mflo0() \ +({ \ + long mflo0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mflo0)); \ + mflo0; \ +}) + +#define mflo1() \ +({ \ + long mflo1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mflo1)); \ + mflo1; \ +}) + +#define mflo2() \ +({ \ + long mflo2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mflo2)); \ + mflo2; \ +}) + +#define mflo3() \ +({ \ + long mflo3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mflo3)); \ + mflo3; \ +}) + +#define mfhi0() \ +({ \ + long mfhi0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mfhi0)); \ + mfhi0; \ +}) + +#define mfhi1() \ +({ \ + long mfhi1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mfhi1)); \ + mfhi1; \ +}) + +#define mfhi2() \ +({ \ + long mfhi2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mfhi2)); \ + mfhi2; \ +}) + +#define mfhi3() \ +({ \ + long mfhi3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mfhi3)); \ + mfhi3; \ +}) + + +#define mtlo0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) #else diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 99fc547af9d3..eab99e536b5c 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -31,7 +31,7 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c201..8efe5a9e2c3e 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -21,6 +21,6 @@ #include <asm/sigcontext.h> #include <asm/siginfo.h> -#define __ARCH_HAS_ODD_SIGACTION +#define __ARCH_HAS_IRIX_SIGACTION #endif /* _ASM_SIGNAL_H */ diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3a..addb9f556b71 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. + * + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever + * supported its use and no libc was using it, so the entire sa-restorer + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 + * retaining only the SA_RESTORER definition as a reminder to avoid + * accidental reuse of the mask bit. */ #define SA_ONSTACK 0x08000000 #define SA_RESETHAND 0x80000000 @@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* Only for o32 */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184c..de75fb50562b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o # -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe -# to enable DSP assembler support here even if the MIPS Release 2 CPU we -# are targetting does not support DSP because all code-paths making use of -# it properly check that the running CPU *actually does* support these -# instructions. +# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not +# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches +# here because the compiler may use DSP ASE instructions (such as lwx) in +# code paths where we cannot check that the CPU we are running on supports it. +# Proper abstraction using HAVE_AS_DSP and macros is done in +# arch/mips/include/asm/mipsregs.h. # ifeq ($(CONFIG_CPU_MIPSR2), y) CFLAGS_DSP = -DHAVE_AS_DSP -# -# Check if assembler supports DSP ASE -# -ifeq ($(call cc-option-yn,-mdsp), y) -CFLAGS_DSP += -mdsp -endif - -# -# Check if assembler supports DSP ASE Rev2 -# -ifeq ($(call cc-option-yn,-mdspr2), y) -CFLAGS_DSP += -mdspr2 -endif - CFLAGS_signal.o = $(CFLAGS_DSP) CFLAGS_signal32.o = $(CFLAGS_DSP) CFLAGS_process.o = $(CFLAGS_DSP) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..5fe66a0c3224 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->tlbsize = 48; break; case PRID_IMP_VR41XX: + set_isa(c, MIPS_CPU_ISA_III); + c->options = R4K_OPTS; + c->tlbsize = 32; switch (c->processor_id & 0xf0) { case PRID_REV_VR4111: c->cputype = CPU_VR4111; @@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC VR4131"; } else { c->cputype = CPU_VR4133; + c->options |= MIPS_CPU_LLSC; __cpu_name[cpu] = "NEC VR4133"; } break; @@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "NEC Vr41xx"; break; } - set_isa(c, MIPS_CPU_ISA_III); - c->options = R4K_OPTS; - c->tlbsize = 32; break; case PRID_IMP_R4300: c->cputype = CPU_R4300; @@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; } diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c0..db9655f08892 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, err = compat_sys_shmctl(first, second, compat_ptr(ptr)); break; default: - err = -EINVAL; + err = -ENOSYS; break; } diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 165867673357..33d067148e61 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,10 +46,9 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#else - PTR_ADDIU sp, PT_SIZE #endif -.endm + PTR_ADDIU sp, PT_SIZE + .endm .macro RETURN_BACK jr ra @@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - addiu sp,sp,8 +#ifdef CONFIG_32BIT + addiu sp,sp,8 +#else + nop +#endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccbe..7a54f74b7818 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mips_r) { seq_printf(m, "isa\t\t\t:"); if (cpu_has_mips_1) - seq_printf(m, "%s", "mips1"); + seq_printf(m, "%s", " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb87..c3abb88170fc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif - if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) + if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) status_set |= ST0_XX; if (cpu_has_dsp) status_set |= ST0_MX; diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab8..a64daee740ee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a |= mask; raw_local_irq_restore(flags); return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a &= ~mask; raw_local_irq_restore(flags); return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - unsigned long res; + int res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a); + res = (mask & *a) != 0; *a ^= mask; raw_local_irq_restore(flags); return res; diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd41..a6adffbb4e5f 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -270,7 +270,7 @@ LEAF(csum_partial) #endif /* odd buffer alignment? */ -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, t7 #else @@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) addu sum, v1 #endif -#ifdef CPU_MIPSR2 +#ifdef CONFIG_CPU_MIPSR2 wsbh v1, sum movn sum, v1, odd #else diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7b..2078915eacb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) return; default: - if (c->isa_level == MIPS_CPU_ISA_M32R1 || - c->isa_level == MIPS_CPU_ISA_M32R2 || - c->isa_level == MIPS_CPU_ISA_M64R1 || - c->isa_level == MIPS_CPU_ISA_M64R2) { + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1ba..df96da7e939b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) c->scache.flags |= MIPS_CACHE_NOT_PRESENT; /* Ignore anything but MIPSxx processors */ - if (c->isa_level != MIPS_CPU_ISA_M32R1 && - c->isa_level != MIPS_CPU_ISA_M32R2 && - c->isa_level != MIPS_CPU_ISA_M64R1 && - c->isa_level != MIPS_CPU_ISA_M64R2) + if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd67..d1faece21b6a 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -19,7 +19,7 @@ #include <asm/mach-au1x00/au1000.h> #include <asm/tlbmisc.h> -#ifdef CONFIG_DEBUG_PCI +#ifdef CONFIG_PCI_DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) do {} while (0) @@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, if (status & (1 << 29)) { *data = 0xffffffff; error = -1; - DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", access_type, bus->number, device); } else if ((status >> 28) & 0xf) { DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index b06c7360b1c6..428da175d073 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -8,7 +8,7 @@ config MN10300 select HAVE_ARCH_KGDB select GENERIC_ATOMIC64 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 014a6482ed4c..9ab3bf2eca8d 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -9,10 +9,9 @@ config OPENRISC select OF_EARLY_FLATTREE select IRQ_DOMAIN select HAVE_MEMBLOCK - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select HAVE_ARCH_TRACEHOOK select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS select GENERIC_IRQ_CHIP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index a9ff712a2864..0339181bf3ac 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -21,7 +21,7 @@ config PARISC select GENERIC_STRNCPY_FROM_USER select SYSCTL_ARCH_UNALIGN_ALLOW select HAVE_MOD_ARCH_SPECIFIC - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_RELA select CLONE_BACKWARDS select TTY # Needed for pdc_cons.c diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01d95e2f0581..113e28206503 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER endif # Use long jumps instead of long branches (needed if your linker fails to -# link a too big vmlinux executable) -cflags-$(CONFIG_MLONGCALLS) += -mlong-calls +# link a too big vmlinux executable). Not enabled for building modules. +ifdef CONFIG_MLONGCALLS +KBUILD_CFLAGS_KERNEL += -mlong-calls +endif # select which processor to optimise for cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 79f694f3ad9b..f0e2784e7cca 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -140,7 +140,10 @@ static inline void *kmap(struct page *page) return page_address(page); } -#define kunmap(page) kunmap_parisc(page_address(page)) +static inline void kunmap(struct page *page) +{ + kunmap_parisc(page_address(page)); +} static inline void *kmap_atomic(struct page *page) { diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 7df49fad29f9..1e40d7f86be3 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -16,6 +16,8 @@ #include <asm/processor.h> #include <asm/cache.h> +extern spinlock_t pa_dbit_lock; + /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #define set_pte_at(mm, addr, ptep, pteval) \ do { \ + unsigned long flags; \ + spin_lock_irqsave(&pa_dbit_lock, flags); \ set_pte(ptep, pteval); \ purge_tlb_entries(mm, addr); \ + spin_unlock_irqrestore(&pa_dbit_lock, flags); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP + pte_t pte; + unsigned long flags; + if (!pte_young(*ptep)) return 0; - return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); -#else - pte_t pte = *ptep; - if (!pte_young(pte)) + + spin_lock_irqsave(&pa_dbit_lock, flags); + pte = *ptep; + if (!pte_young(pte)) { + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 0; - set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + } + set_pte(ptep, pte_mkold(pte)); + purge_tlb_entries(vma->vm_mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return 1; -#endif } -extern spinlock_t pa_dbit_lock; - struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; + unsigned long flags; - spin_lock(&pa_dbit_lock); + spin_lock_irqsave(&pa_dbit_lock, flags); old_pte = *ptep; pte_clear(mm,addr,ptep); - spin_unlock(&pa_dbit_lock); + purge_tlb_entries(mm, addr); + spin_unlock_irqrestore(&pa_dbit_lock, flags); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_SMP - unsigned long new, old; - - do { - old = pte_val(*ptep); - new = pte_val(pte_wrprotect(__pte (old))); - } while (cmpxchg((unsigned long *) ptep, old, new) != old); + unsigned long flags; + spin_lock_irqsave(&pa_dbit_lock, flags); + set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); -#else - pte_t old_pte = *ptep; - set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); -#endif + spin_unlock_irqrestore(&pa_dbit_lock, flags); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4ba2c93770f1..e0a82358517e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -181,30 +181,24 @@ struct exception_data { #if !defined(CONFIG_64BIT) #define __put_kernel_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%1)" \ - "\n2:\tstw %3,4(%1)\n\t" \ + "\n2:\tstw %R2,4(%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) #define __put_user_asm64(__val,ptr) do { \ - u64 __val64 = (u64)(__val); \ - u32 hi = (__val64) >> 32; \ - u32 lo = (__val64) & 0xffffffff; \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%%sr3,%1)" \ - "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ + "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ : "=r"(__pu_err) \ - : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 4b12890642eb..83ded26cad06 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) /* Note: purge_tlb_entries can be called at startup with no context. */ - /* Disable preemption while we play with %sr1. */ - preempt_disable(); - mtsp(mm->context, 1); purge_tlb_start(flags); + mtsp(mm->context, 1); pdtlb(addr); pitlb(addr); purge_tlb_end(flags); - preempt_enable(); } EXPORT_SYMBOL(purge_tlb_entries); diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 6795dc6c995f..568b2c61ea02 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -120,11 +120,13 @@ extern void __ashrdi3(void); extern void __ashldi3(void); extern void __lshrdi3(void); extern void __muldi3(void); +extern void __ucmpdi2(void); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); +EXPORT_SYMBOL(__ucmpdi2); asmlinkage void * __canonicalize_funcptr_for_compare(void *); EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5f2e6904d14a..5651536ac733 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,6 +2,7 @@ # Makefile for parisc-specific library files # -lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o +lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ + ucmpdi2.o obj-y := iomap.o diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c new file mode 100644 index 000000000000..149c016f32c5 --- /dev/null +++ b/arch/parisc/lib/ucmpdi2.c @@ -0,0 +1,25 @@ +#include <linux/module.h> + +union ull_union { + unsigned long long ull; + struct { + unsigned int high; + unsigned int low; + } ui; +}; + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ + union ull_union au = {.ull = a}; + union ull_union bu = {.ull = b}; + + if (au.ui.high < bu.ui.high) + return 0; + else if (au.ui.high > bu.ui.high) + return 2; + if (au.ui.low < bu.ui.low) + return 0; + else if (au.ui.low > bu.ui.low) + return 2; + return 1; +} diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b89d7eb730a2..ea5bb045983a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -90,6 +90,7 @@ config GENERIC_GPIO config PPC bool default y + select BINFMT_ELF select OF select OF_EARLY_FLATTREE select HAVE_FTRACE_MCOUNT_RECORD @@ -98,7 +99,7 @@ config PPC select HAVE_FUNCTION_GRAPH_TRACER select SYSCTL_EXCEPTION_TRACE select ARCH_WANT_OPTIONAL_GPIOLIB - select HAVE_VIRT_TO_BUS if !PPC64 + select VIRT_TO_BUS if !PPC64 select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2fdb47a19efd..b59e06f507ea 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); /* * VSID allocation (256MB segment) * - * We first generate a 38-bit "proto-VSID". For kernel addresses this - * is equal to the ESID | 1 << 37, for user addresses it is: - * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) + * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated + * from mmu context id and effective segment id of the address. * - * This splits the proto-VSID into the below range - * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range - * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range - * - * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 - * That is, we assign half of the space to user processes and half - * to the kernel. + * For user processes max context id is limited to ((1ul << 19) - 5) + * for kernel space, we use the top 4 context ids to map address as below + * NOTE: each context only support 64TB now. + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] * * The proto-VSIDs are then scrambled into real VSIDs with the * multiplicative hash: @@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); * VSID_MULTIPLIER is prime, so in particular it is * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). - * - * This scheme has several advantages over older methods: - * - * - We have VSIDs allocated for every kernel address - * (i.e. everything above 0xC000000000000000), except the very top - * segment, which simplifies several things. + * a divide or extra multiply (see below). The scramble function gives + * robust scattering in the hash table (at least based on some initial + * results). * - * - We allow for USER_ESID_BITS significant bits of ESID and - * CONTEXT_BITS bits of context for user addresses. - * i.e. 64T (46 bits) of address space for up to half a million contexts. + * We also consider VSID 0 special. We use VSID 0 for slb entries mapping + * bad address. This enables us to consolidate bad address handling in + * hash_page. * - * - The scramble function gives robust scattering in the hash - * table (at least based on some initial results). The previous - * method was more susceptible to pathological cases giving excessive - * hash collisions. + * We also need to avoid the last segment of the last context, because that + * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 + * because of the modulo operation in vsid scramble. But the vmemmap + * (which is what uses region 0xf) will never be close to 64TB in size + * (it's 56 bytes per page of system memory). */ +#define CONTEXT_BITS 19 +#define ESID_BITS 18 +#define ESID_BITS_1T 6 + +/* + * 256MB segment + * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments + * available for user + kernel mapping. The top 4 contexts are used for + * kernel mapping. Each segment contains 2^28 bytes. Each + * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts + * (19 == 37 + 28 - 46). + */ +#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) + /* * This should be computed such that protovosid * vsid_mulitplier * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus */ #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ -#define VSID_BITS_256M 38 +#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ -#define VSID_BITS_1T 26 +#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) -#define CONTEXT_BITS 19 -#define USER_ESID_BITS 18 -#define USER_ESID_BITS_1T 6 -#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) +#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) /* * This macro generates asm code to compute the VSID scramble @@ -421,7 +428,8 @@ extern void slb_set_size(u16 size); srdi rx,rt,VSID_BITS_##size; \ clrldi rt,rt,(64-VSID_BITS_##size); \ add rt,rt,rx; /* add high and low bits */ \ - /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ + /* NOTE: explanation based on VSID_BITS_##size = 36 \ + * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ * 2^36-1+2^28-1. That in particular means that if r3 >= \ * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ * the bit clear, r3 already has the answer we want, if it \ @@ -513,34 +521,6 @@ typedef struct { }) #endif /* 1 */ -/* - * This is only valid for addresses >= PAGE_OFFSET - * The proto-VSID space is divided into two class - * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 - * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 - * - * With KERNEL_START at 0xc000000000000000, the proto vsid for - * the kernel ends up with 0xc00000000 (36 bits). With 64TB - * support we need to have kernel proto-VSID in the - * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. - */ -static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) -{ - unsigned long proto_vsid; - /* - * We need to make sure proto_vsid for the kernel is - * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) - */ - if (ssize == MMU_SEGSIZE_256M) { - proto_vsid = ea >> SID_SHIFT; - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); - return vsid_scramble(proto_vsid, 256M); - } - proto_vsid = ea >> SID_SHIFT_1T; - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); - return vsid_scramble(proto_vsid, 1T); -} - /* Returns the segment size indicator for a user address */ static inline int user_segment_size(unsigned long addr) { @@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr) return MMU_SEGSIZE_256M; } -/* This is only valid for user addresses (which are below 2^44) */ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, int ssize) { + /* + * Bad address. We return VSID 0 for that + */ + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) + return 0; + if (ssize == MMU_SEGSIZE_256M) - return vsid_scramble((context << USER_ESID_BITS) + return vsid_scramble((context << ESID_BITS) | (ea >> SID_SHIFT), 256M); - return vsid_scramble((context << USER_ESID_BITS_1T) + return vsid_scramble((context << ESID_BITS_1T) | (ea >> SID_SHIFT_1T), 1T); } +/* + * This is only valid for addresses >= PAGE_OFFSET + * + * For kernel space, we use the top 4 context ids to map address as below + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] + */ +static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) +{ + unsigned long context; + + /* + * kernel take the top 4 context from the available range + */ + context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; + return get_vsid(context, ea, ssize); +} #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 75a3d71b895d..19599ef352bc 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 256c5bf0adb7..04d69c4a5ac2 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -304,7 +304,7 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything else left to do? */ - SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ + SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite @@ -657,7 +657,7 @@ resume_kernel: /* Clear _TIF_EMULATE_STACK_STORE flag */ lis r11,_TIF_EMULATE_STACK_STORE@h addi r5,r9,TI_FLAGS - ldarx r4,0,r5 +0: ldarx r4,0,r5 andc r4,r4,r11 stdcx. r4,0,r5 bne- 0b diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index f3eab8594d9f..d44a571e45a7 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -23,8 +23,10 @@ #include <asm/code-patching.h> #include <asm/machdep.h> +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) extern void epapr_ev_idle(void); extern u32 epapr_ev_idle_start[]; +#endif bool epapr_paravirt_enabled; @@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void) for (i = 0; i < (len / 4); i++) { patch_instruction(epapr_hypercall_start + i, insts[i]); +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) patch_instruction(epapr_ev_idle_start + i, insts[i]); +#endif } +#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) if (of_get_property(hyper_node, "has-idle", NULL)) ppc_md.power_save = epapr_ev_idle; +#endif epapr_paravirt_enabled = true; diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 87ef8f5ee5bc..56bd92362ce1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1066,78 +1066,6 @@ unrecov_user_slb: #endif /* __DISABLED__ */ -/* - * r13 points to the PACA, r9 contains the saved CR, - * r12 contain the saved SRR1, SRR0 is still ready for return - * r3 has the faulting address - * r9 - r13 are saved in paca->exslb. - * r3 is saved in paca->slb_r3 - * We assume we aren't going to take any exceptions during this procedure. - */ -_GLOBAL(slb_miss_realmode) - mflr r10 -#ifdef CONFIG_RELOCATABLE - mtctr r11 -#endif - - stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ - std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - - bl .slb_allocate_realmode - - /* All done -- return from exception. */ - - ld r10,PACA_EXSLB+EX_LR(r13) - ld r3,PACA_EXSLB+EX_R3(r13) - lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ - - mtlr r10 - - andi. r10,r12,MSR_RI /* check for unrecoverable exception */ - beq- 2f - -.machine push -.machine "power4" - mtcrf 0x80,r9 - mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ -.machine pop - - RESTORE_PPR_PACA(PACA_EXSLB, r9) - ld r9,PACA_EXSLB+EX_R9(r13) - ld r10,PACA_EXSLB+EX_R10(r13) - ld r11,PACA_EXSLB+EX_R11(r13) - ld r12,PACA_EXSLB+EX_R12(r13) - ld r13,PACA_EXSLB+EX_R13(r13) - rfid - b . /* prevent speculative execution */ - -2: mfspr r11,SPRN_SRR0 - ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10,unrecov_slb) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - rfid - b . - -unrecov_slb: - EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) - DISABLE_INTS - bl .save_nvgprs -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception - b 1b - - -#ifdef CONFIG_PPC_970_NAP -power4_fixup_nap: - andc r9,r9,r10 - std r9,TI_LOCAL_FLAGS(r11) - ld r10,_LINK(r1) /* make idle task do the */ - std r10,_NIP(r1) /* equivalent of a blr */ - blr -#endif - .align 7 .globl alignment_common alignment_common: @@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler) /* + * r13 points to the PACA, r9 contains the saved CR, + * r12 contain the saved SRR1, SRR0 is still ready for return + * r3 has the faulting address + * r9 - r13 are saved in paca->exslb. + * r3 is saved in paca->slb_r3 + * We assume we aren't going to take any exceptions during this procedure. + */ +_GLOBAL(slb_miss_realmode) + mflr r10 +#ifdef CONFIG_RELOCATABLE + mtctr r11 +#endif + + stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ + std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + + bl .slb_allocate_realmode + + /* All done -- return from exception. */ + + ld r10,PACA_EXSLB+EX_LR(r13) + ld r3,PACA_EXSLB+EX_R3(r13) + lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ + + mtlr r10 + + andi. r10,r12,MSR_RI /* check for unrecoverable exception */ + beq- 2f + +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + + RESTORE_PPR_PACA(PACA_EXSLB, r9) + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + rfid + b . /* prevent speculative execution */ + +2: mfspr r11,SPRN_SRR0 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10,unrecov_slb) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) + mtspr SPRN_SRR1,r10 + rfid + b . + +unrecov_slb: + EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) + DISABLE_INTS + bl .save_nvgprs +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b + + +#ifdef CONFIG_PPC_970_NAP +power4_fixup_nap: + andc r9,r9,r10 + std r9,TI_LOCAL_FLAGS(r11) + ld r10,_LINK(r1) /* make idle task do the */ + std r10,_NIP(r1) /* equivalent of a blr */ + blr +#endif + +/* * Hash table stuff */ .align 7 @@ -1452,20 +1452,36 @@ do_ste_alloc: _GLOBAL(do_stab_bolted) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ + mfspr r11,SPRN_DAR /* ea */ + /* + * check for bad kernel/user address + * (ea & ~REGION_MASK) >= PGTABLE_RANGE + */ + rldicr. r9,r11,4,(63 - 46 - 4) + li r9,0 /* VSID = 0 for bad address */ + bne- 0f + + /* + * Calculate VSID: + * This is the kernel vsid, we take the top for context from + * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * Here we know that (ea >> 60) == 0xc + */ + lis r9,(MAX_USER_CONTEXT + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT + 1)@l + + srdi r10,r11,SID_SHIFT + rldimi r10,r9,ESID_BITS,0 /* proto vsid */ + ASM_VSID_SCRAMBLE(r10, r9, 256M) + rldic r9,r10,12,16 /* r9 = vsid << 12 */ + +0: /* Hash to the primary group */ ld r10,PACASTABVIRT(r13) - mfspr r11,SPRN_DAR - srdi r11,r11,28 + srdi r11,r11,SID_SHIFT rldimi r10,r11,7,52 /* r10 = first ste of the group */ - /* Calculate VSID */ - /* This is a kernel address, so protovsid = ESID | 1 << 37 */ - li r9,0x1 - rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 - ASM_VSID_SCRAMBLE(r11, r9, 256M) - rldic r9,r11,12,16 /* r9 = vsid << 12 */ - /* Search the primary group for a free entry */ 1: ld r11,0(r10) /* Test valid bit of the current ste */ andi. r11,r11,0x80 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59dd545fdde1..16e77a81ab4f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) new->thread.regs->msr |= (MSR_FP | new->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&new->thread); new->thread.regs->msr |= MSR_VEC; } +#endif /* We may as well turn on VSX too since all the state is restored now */ if (msr & MSR_VSX) new->thread.regs->msr |= MSR_VSX; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7f7fb7fd991b..13f8d168b3f1 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2832,11 +2832,13 @@ static void unreloc_toc(void) { } #else -static void __reloc_toc(void *tocstart, unsigned long offset, - unsigned long nr_entries) +static void __reloc_toc(unsigned long offset, unsigned long nr_entries) { unsigned long i; - unsigned long *toc_entry = (unsigned long *)tocstart; + unsigned long *toc_entry; + + /* Get the start of the TOC by using r2 directly. */ + asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); for (i = 0; i < nr_entries; i++) { *toc_entry = *toc_entry + offset; @@ -2850,8 +2852,7 @@ static void reloc_toc(void) unsigned long nr_entries = (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); - /* Need to add offset to get at __prom_init_toc_start */ - __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); + __reloc_toc(offset, nr_entries); mb(); } @@ -2864,8 +2865,7 @@ static void unreloc_toc(void) mb(); - /* __prom_init_toc_start has been relocated, no need to add offset */ - __reloc_toc(__prom_init_toc_start, -offset, nr_entries); + __reloc_toc(-offset, nr_entries); } #endif #endif diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 245c1b6a0858..f9b30c68ba47 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child, brk.address = bp_info->addr & ~7UL; brk.type = HW_BRK_TYPE_TRANSLATE; + brk.len = 8; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) brk.type |= HW_BRK_TYPE_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3acb28e245b4..95068bf569ad 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return 0; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 995f8543cb57..c1794286098c 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, do_load_up_transact_fpu(¤t->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } +#ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(¤t->thread); regs->msr |= MSR_VEC; } +#endif return err; } diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 84dbace657ce..2da67e7a16d5 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint) or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ mtmsr r5 +#ifdef CONFIG_ALTIVEC /* FP and VEC registers: These are recheckpointed from thread.fpr[] * and thread.vr[] respectively. The thread.transact_fpr[] version * is more modern, and will be loaded subsequently by any FPUnavailable @@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint) REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ ld r5, THREAD_VRSAVE(r3) mtspr SPRN_VRSAVE, r5 +#endif dont_restore_vec: andi. r0, r4, MSR_FP diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index ead58e317294..5d7d29a313eb 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) vcpu3s->context_id[0] = err; vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) - << USER_ESID_BITS) - 1; - vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + << ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd43655f..33db48a8ce24 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -26,17 +26,20 @@ #define E500_PID_NUM 3 #define E500_TLB_NUM 2 -#define E500_TLB_VALID 1 -#define E500_TLB_BITMAP 2 +/* entry is mapped somewhere in host TLB */ +#define E500_TLB_VALID (1 << 0) +/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ +#define E500_TLB_BITMAP (1 << 1) +/* TLB1 entry is mapped by host TLB0 */ #define E500_TLB_TLB0 (1 << 2) struct tlbe_ref { - pfn_t pfn; - unsigned int flags; /* E500_TLB_* */ + pfn_t pfn; /* valid only for TLB0, except briefly */ + unsigned int flags; /* E500_TLB_* */ }; struct tlbe_priv { - struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ + struct tlbe_ref ref; }; #ifdef CONFIG_KVM_E500V2 @@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { unsigned int gtlb_nv[E500_TLB_NUM]; - /* - * information associated with each host TLB entry -- - * TLB1 only for now. If/when guest TLB1 entries can be - * mapped with host TLB0, this will be used for that too. - * - * We don't want to use this for guest TLB0 because then we'd - * have the overhead of doing the translation again even if - * the entry is still in the guest TLB (e.g. we swapped out - * and back, and our host TLB entries got evicted). - */ - struct tlbe_ref *tlb_refs[E500_TLB_NUM]; unsigned int host_tlb1_nv; u32 svr; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9b..1c6a9d729df4 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ - if (!(ref->flags & E500_TLB_VALID)) - return; + if (!(ref->flags & E500_TLB_VALID)) { + WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), + "%s: flags %x\n", __func__, ref->flags); + WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); + } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; @@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, pfn_t pfn) { ref->pfn = pfn; - ref->flags = E500_TLB_VALID; + ref->flags |= E500_TLB_VALID; if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); @@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { + /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } @@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { - int tlbsel = 0; - int i; - - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->gtlb_priv[tlbsel][i].ref; - kvmppc_e500_ref_release(ref); - } -} - -static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) -{ - int stlbsel = 1; + int tlbsel; int i; - kvmppc_e500_tlbil_all(vcpu_e500); - - for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { - struct tlbe_ref *ref = - &vcpu_e500->tlb_refs[stlbsel][i]; - kvmppc_e500_ref_release(ref); + for (tlbsel = 0; tlbsel <= 1; tlbsel++) { + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } } - - clear_tlb_privs(vcpu_e500); } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - clear_tlb_refs(vcpu_e500); + kvmppc_e500_tlbil_all(vcpu_e500); + clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } @@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } - /* Drop old ref and setup new one. */ - kvmppc_e500_ref_release(ref); kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, @@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; - vcpu_e500->tlb_refs[1][sesel] = *ref; - vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { - unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; + unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } - vcpu_e500->h2g_tlb1_rmap[sesel] = esel; + + vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; + vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; + vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; + WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } @@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { - struct tlbe_ref ref; + struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; - ref.flags = 0; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, - &ref); + ref); if (r) return r; @@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, } /* Otherwise map into TLB1 */ - sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; @@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - /* Triggers after clear_tlb_refs or on initial mapping */ + /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { @@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; - vcpu_e500->tlb_refs[0] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[0]) - goto err; - - vcpu_e500->tlb_refs[1] = - kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, - GFP_KERNEL); - if (!vcpu_e500->tlb_refs[1]) - goto err; - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * host_tlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) - goto err; + return -EINVAL; return 0; - -err: - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); - return -EINVAL; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); - kfree(vcpu_e500->tlb_refs[0]); - kfree(vcpu_e500->tlb_refs[1]); } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26e65fb..2f4baa074b2e 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { } +static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); + void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); mtspr(SPRN_GESR, vcpu->arch.shared->esr); - if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) + if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || + __get_cpu_var(last_vcpu_on_cpu) != vcpu) { kvmppc_e500_tlbil_all(vcpu_e500); + __get_cpu_var(last_vcpu_on_cpu) = vcpu; + } kvmppc_load_guest_fp(vcpu); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1b6e1271719f..f410c3e12c1e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; + /* + * If we hit a bad address return error. + */ + if (!vsid) + return -1; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; @@ -759,6 +764,8 @@ void __init early_init_mmu(void) /* Initialize stab / SLB management */ if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); + else + stab_initialize(get_paca()->stab_real); } #ifdef CONFIG_SMP @@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); - if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { - DBG_LOW(" out of pgtable range !\n"); - return 1; - } - /* Get region & vsid */ switch (REGION_ID(ea)) { case USER_REGION_ID: @@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); + /* Bad address. */ + if (!vsid) { + DBG_LOW("Bad address!\n"); + return 1; + } /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) @@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Get VSID */ ssize = user_segment_size(ea); vsid = get_vsid(mm->context.id, ea, ssize); + if (!vsid) + return; /* Hash doesn't like irqs */ local_irq_save(flags); @@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); + /* Don't create HPTE entries for bad address */ + if (!vsid) + return; ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40bc5b0ace54..d1d1b92c5b99 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -29,15 +29,6 @@ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); -/* - * 256MB segment - * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments - * available for user mappings. Each segment contains 2^28 bytes. Each - * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts - * (19 == 37 + 28 - 46). - */ -#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) - int __init_new_context(void) { int index; @@ -56,7 +47,7 @@ again: else if (err) return err; - if (index > MAX_CONTEXT) { + if (index > MAX_USER_CONTEXT) { spin_lock(&mmu_context_lock); ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e212a271c7a4..654258f165ae 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -61,7 +61,7 @@ #endif #ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) +#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca227757..17aa6dfceb34 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -31,10 +31,15 @@ * No other registers are examined or changed. */ _GLOBAL(slb_allocate_realmode) - /* r3 = faulting address */ + /* + * check for bad kernel/user address + * (ea & ~REGION_MASK) >= PGTABLE_RANGE + */ + rldicr. r9,r3,4,(63 - 46 - 4) + bne- 8f srdi r9,r3,60 /* get region */ - srdi r10,r3,28 /* get esid */ + srdi r10,r3,SID_SHIFT /* get esid */ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ @@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 - li r9,0x1 /* - * for 1T we shift 12 bits more. slb_finish_load_1T will do - * the necessary adjustment + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * r9 = region id. */ - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + + BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) @@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) _GLOBAL(slb_miss_kernel_load_io) li r11,0 6: - li r9,0x1 /* - * for 1T we shift 12 bits more. slb_finish_load_1T will do - * the necessary adjustment + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 + * r9 = region id. */ - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T -0: /* user address: proto-VSID = context << 15 | ESID. First check - * if the address is within the boundaries of the user region - */ - srdi. r9,r10,USER_ESID_BITS - bne- 8f /* invalid ea bits set */ - - +0: /* when using slices, we extract the psize off the slice bitmaps * and then we need to get the sllp encoding off the mmu_psize_defs * array. @@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - rldimi r10,r9,USER_ESID_BITS,0 -BEGIN_FTR_SECTION bge slb_finish_load_1T END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ li r10,0 /* BAD_VSID */ + li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ b slb_finish_load @@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) /* get context to calculate proto-VSID */ ld r9,PACACONTEXTID(r13) - rldimi r10,r9,USER_ESID_BITS,0 - /* fall through slb_finish_load */ #endif /* __DISABLED__ */ @@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) /* * Finish loading of an SLB entry and return * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET + * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: + rldimi r10,r9,ESID_BITS,0 ASM_VSID_SCRAMBLE(r10,r9,256M) /* * bits above VSID_BITS_256M need to be ignored from r10 @@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) /* * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 + * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 */ slb_finish_load_1T: - srdi r10,r10,40-28 /* get 1T ESID */ + srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ + rldimi r10,r9,ESID_BITS_1T,0 ASM_VSID_SCRAMBLE(r10,r9,1T) /* * bits above VSID_BITS_1T need to be ignored from r10 diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 0d82ef50dc3f..023ec8a13f38 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); - WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } + WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index b554879bd31e..3c475d6267c7 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = { .attrs = power7_events_attr, }; +PMU_FORMAT_ATTR(event, "config:0-19"); + +static struct attribute *power7_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +struct attribute_group power7_pmu_format_group = { + .name = "format", + .attrs = power7_pmu_format_attr, +}; + static const struct attribute_group *power7_pmu_attr_groups[] = { + &power7_pmu_format_group, &power7_pmu_events_group, NULL, }; diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 611e92f291c4..7179726ba5c5 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data) return IRQ_HANDLED; }; -static int __devinit gpio_halt_probe(struct platform_device *pdev) +static int gpio_halt_probe(struct platform_device *pdev) { enum of_gpio_flags flags; struct device_node *node = pdev->dev.of_node; @@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev) return 0; } -static int __devexit gpio_halt_remove(struct platform_device *pdev) +static int gpio_halt_remove(struct platform_device *pdev) { if (halt_node) { int gpio = of_get_gpio(halt_node, 0); @@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = { .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, - .remove = __devexit_p(gpio_halt_remove), + .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index cea2f09c4241..18e3b76c78d7 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -124,9 +124,8 @@ config 6xx select PPC_HAVE_PMU_SUPPORT config POWER3 - bool depends on PPC64 && PPC_BOOK3S - default y if !POWER4_ONLY + def_bool y config POWER4 depends on PPC64 && PPC_BOOK3S @@ -145,8 +144,7 @@ config TUNE_CELL but somewhat slower on other machines. This option only changes the scheduling of instructions, not the selection of instructions itself, so the resulting kernel will keep running on all other - machines. When building a kernel that is supposed to run only - on Cell, you should also select the POWER4_ONLY option. + machines. # this is temp to handle compat with arch=ppc config 8xx diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fed355a..299731e9036b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - BUG_ON(lpar_rc != H_NOT_FOUND); + + /* + * The test for adjunct partition is performed before the + * ANDCOND test. H_RESOURCE may be returned, so we need to + * check for that as well. + */ + BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 4b505370a1d5..bda6ba6f3cf5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -134,7 +134,7 @@ config S390 select HAVE_SYSCALL_WRAPPERS select HAVE_UID16 if 32BIT select HAVE_VIRT_CPU_ACCOUNTING - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select INIT_ALL_POSSIBLE select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA @@ -375,19 +375,6 @@ config PACK_STACK Say Y if you are unsure. -config SMALL_STACK - def_bool n - prompt "Use 8kb for kernel stack instead of 16kb" - depends on PACK_STACK && 64BIT && !LOCKDEP - help - If you say Y here and the compiler supports the -mkernel-backchain - option the kernel will use a smaller kernel stack size. The reduced - size is 8kb instead of 16kb. This allows to run more threads on a - system and reduces the pressure on the memory management for higher - order page allocations. - - Say N if you are unsure. - config CHECK_STACK def_bool y prompt "Detect kernel stack overflow" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 7e3ce78d4290..a7d68a467ce8 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls ifeq ($(call cc-option-yn,-mkernel-backchain),y) cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK -cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -ifdef CONFIG_SMALL_STACK -STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) -endif endif # new style option for packed stacks ifeq ($(call cc-option-yn,-mpacked-stack),y) cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK -cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK -ifdef CONFIG_SMALL_STACK -STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) ) -endif endif ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 9fd4a40c6752..bb5dd496614f 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) int hypfs_dbfs_init(void) { dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); - if (IS_ERR(dbfs_dir)) - return PTR_ERR(dbfs_dir); - return 0; + return PTR_RET(dbfs_dir); } void hypfs_dbfs_exit(void) diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 15422933c60b..4d8604e311f3 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -61,8 +61,6 @@ extern const char _sb_findmap[]; #ifndef CONFIG_64BIT -#define __BITOPS_ALIGN 3 -#define __BITOPS_WORDSIZE 32 #define __BITOPS_OR "or" #define __BITOPS_AND "nr" #define __BITOPS_XOR "xr" @@ -81,8 +79,6 @@ extern const char _sb_findmap[]; #else /* CONFIG_64BIT */ -#define __BITOPS_ALIGN 7 -#define __BITOPS_WORDSIZE 64 #define __BITOPS_OR "ogr" #define __BITOPS_AND "ngr" #define __BITOPS_XOR "xgr" @@ -101,8 +97,7 @@ extern const char _sb_findmap[]; #endif /* CONFIG_64BIT */ -#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) -#define __BITOPS_BARRIER() asm volatile("" : : : "memory") +#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) #ifdef CONFIG_SMP /* @@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make OR mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); } @@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make AND mask */ - mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); + mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); } @@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make XOR mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); } @@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make OR/test mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); - __BITOPS_BARRIER(); + barrier(); return (old & mask) != 0; } @@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make AND/test mask */ - mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); + mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); - __BITOPS_BARRIER(); + barrier(); return (old ^ new) != 0; } @@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) addr = (unsigned long) ptr; /* calculate address for CS */ - addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; + addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; /* make XOR/test mask */ - mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); + mask = 1UL << (nr & (BITS_PER_LONG - 1)); /* Do the atomic update. */ __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); - __BITOPS_BARRIER(); + barrier(); return (old & mask) != 0; } #endif /* CONFIG_SMP */ @@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " oc %O0(1,%R0),%1" : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); @@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); *(unsigned char *) addr |= 1 << (nr & 7); } @@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " nc %O0(1,%R0),%1" : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); @@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); *(unsigned char *) addr &= ~(1 << (nr & 7)); } @@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); asm volatile( " xc %O0(1,%R0),%1" : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); @@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) { unsigned long addr; - addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); *(unsigned char *) addr ^= 1 << (nr & 7); } @@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( " oc %O0(1,%R0),%1" @@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( " nc %O0(1,%R0),%1" @@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(unsigned char *) addr; asm volatile( " xc %O0(1,%R0),%1" @@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr unsigned long addr; unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); + addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); ch = *(volatile unsigned char *) addr; return (ch >> (nr & 7)) & 1; } @@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr static inline int __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { return (((volatile char *) addr) - [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; + [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0; } #define test_bit(nr,addr) \ @@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { set = __flo_word(0, *p & (~0UL << bit)); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_bit_left(p, size); @@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * __ffz_word returns __BITOPS_WORDSIZE + * __ffz_word returns BITS_PER_LONG * if no zero bit is present in the word. */ set = __ffz_word(bit, *p >> bit); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_zero_bit(p, size); @@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * __ffs_word returns __BITOPS_WORDSIZE + * __ffs_word returns BITS_PER_LONG * if no one bit is present in the word. */ set = __ffs_word(0, *p & (~0UL << bit)); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_bit(p, size); @@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * s390 version of ffz returns __BITOPS_WORDSIZE + * s390 version of ffz returns BITS_PER_LONG * if no zero bit is present in the word. */ set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_zero_bit_le(p, size); @@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size, if (offset >= size) return size; - bit = offset & (__BITOPS_WORDSIZE - 1); + bit = offset & (BITS_PER_LONG - 1); offset -= bit; size -= offset; - p = addr + offset / __BITOPS_WORDSIZE; + p = addr + offset / BITS_PER_LONG; if (bit) { /* - * s390 version of ffz returns __BITOPS_WORDSIZE + * s390 version of ffz returns BITS_PER_LONG * if no zero bit is present in the word. */ set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); if (set >= size) return size + offset; - if (set < __BITOPS_WORDSIZE) + if (set < BITS_PER_LONG) return set + offset; - offset += __BITOPS_WORDSIZE; - size -= __BITOPS_WORDSIZE; + offset += BITS_PER_LONG; + size -= BITS_PER_LONG; p++; } return offset + find_first_bit_le(p, size); diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index e6061617a50b..f201af8be580 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) extern struct ccw_device *ccw_device_probe_console(void); -extern int ccw_device_force_console(void); +extern void ccw_device_wait_idle(struct ccw_device *); +extern int ccw_device_force_console(struct ccw_device *); int ccw_device_siosl(struct ccw_device *); diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index ad2b924167d7..ffb898961c8d 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, return 0; } -extern void wait_cons_dev(void); - extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index f8c6df6cd1f0..c1e7c646727c 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -70,6 +70,22 @@ typedef u32 compat_ulong_t; typedef u64 compat_u64; typedef u32 compat_uptr_t; +typedef struct { + u32 mask; + u32 addr; +} __aligned(8) psw_compat_t; + +typedef struct { + psw_compat_t psw; + u32 gprs[NUM_GPRS]; + u32 acrs[NUM_ACRS]; + u32 orig_gpr2; +} s390_compat_regs; + +typedef struct { + u32 gprs_high[NUM_GPRS]; +} s390_compat_regs_high; + struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; @@ -124,18 +140,33 @@ struct compat_flock64 { }; struct compat_statfs { - s32 f_type; - s32 f_bsize; - s32 f_blocks; - s32 f_bfree; - s32 f_bavail; - s32 f_files; - s32 f_ffree; + u32 f_type; + u32 f_bsize; + u32 f_blocks; + u32 f_bfree; + u32 f_bavail; + u32 f_files; + u32 f_ffree; + compat_fsid_t f_fsid; + u32 f_namelen; + u32 f_frsize; + u32 f_flags; + u32 f_spare[4]; +}; + +struct compat_statfs64 { + u32 f_type; + u32 f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; compat_fsid_t f_fsid; - s32 f_namelen; - s32 f_frsize; - s32 f_flags; - s32 f_spare[5]; + u32 f_namelen; + u32 f_frsize; + u32 f_flags; + u32 f_spare[4]; }; #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff @@ -248,8 +279,6 @@ static inline int is_compat_task(void) return is_32bit_task(); } -#endif - static inline void __user *arch_compat_alloc_user_space(long len) { unsigned long stack; @@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len) return (void __user *) (stack - len); } +#endif + struct compat_ipc64_perm { compat_key_t key; __compat_uid32_t uid; diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index f1eddd150dd7..c879fad404c8 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -12,6 +12,7 @@ #ifndef _ASM_S390_CPU_MF_H #define _ASM_S390_CPU_MF_H +#include <linux/errno.h> #include <asm/facility.h> #define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 8d4847191ecc..dc9200ca32ed 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -34,6 +34,8 @@ struct arsb { u32 reserved[4]; } __packed; +#define EQC_WR_PROHIBIT 22 + struct msb { u8 fmt:4; u8 oc:4; @@ -96,11 +98,13 @@ struct scm_device { #define OP_STATE_TEMP_ERR 2 #define OP_STATE_PERM_ERR 3 +enum scm_event {SCM_CHANGE, SCM_AVAIL}; + struct scm_driver { struct device_driver drv; int (*probe) (struct scm_device *scmdev); int (*remove) (struct scm_device *scmdev); - void (*notify) (struct scm_device *scmdev); + void (*notify) (struct scm_device *scmdev, enum scm_event event); void (*handler) (struct scm_device *scmdev, void *data, int error); }; diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 1bfdf24b85a2..78f4f8711d58 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -119,6 +119,8 @@ */ #include <asm/ptrace.h> +#include <asm/compat.h> +#include <asm/syscall.h> #include <asm/user.h> typedef s390_fp_regs elf_fpregset_t; @@ -180,18 +182,31 @@ extern unsigned long elf_hwcap; extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) -#ifdef CONFIG_64BIT +#ifndef CONFIG_COMPAT +#define SET_PERSONALITY(ex) \ +do { \ + set_personality(PER_LINUX | \ + (current->personality & (~PER_MASK))); \ + current_thread_info()->sys_call_table = \ + (unsigned long) &sys_call_table; \ +} while (0) +#else /* CONFIG_COMPAT */ #define SET_PERSONALITY(ex) \ do { \ if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX | \ (current->personality & ~PER_MASK)); \ - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ set_thread_flag(TIF_31BIT); \ - else \ + current_thread_info()->sys_call_table = \ + (unsigned long) &sys_call_table_emu; \ + } else { \ clear_thread_flag(TIF_31BIT); \ + current_thread_info()->sys_call_table = \ + (unsigned long) &sys_call_table; \ + } \ } while (0) -#endif /* CONFIG_64BIT */ +#endif /* CONFIG_COMPAT */ #define STACK_RND_MASK 0x7ffUL diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb32185ce1..379d96e2105e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); #define ioremap_nocache(addr, size) ioremap(addr, size) #define ioremap_wc ioremap_nocache -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 05333b7f0469..6c1801235db9 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) struct zpci_dev *zpci_alloc_device(void); int zpci_create_device(struct zpci_dev *); int zpci_enable_device(struct zpci_dev *); +int zpci_disable_device(struct zpci_dev *); void zpci_stop_device(struct zpci_dev *); void zpci_free_device(struct zpci_dev *); int zpci_scan_device(struct zpci_dev *); diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h index 6bbec4265b6e..1ca5d1047c71 100644 --- a/arch/s390/include/asm/pci_debug.h +++ b/arch/s390/include/asm/pci_debug.h @@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id; extern debug_info_t *pci_debug_err_id; #ifdef CONFIG_PCI_DEBUG -#define zpci_dbg(fmt, args...) \ - do { \ - if (pci_debug_msg_id->level >= 2) \ - debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\ - } while (0) +#define zpci_dbg(imp, fmt, args...) \ + debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) #else /* !CONFIG_PCI_DEBUG */ -#define zpci_dbg(fmt, args...) do { } while (0) +#define zpci_dbg(imp, fmt, args...) do { } while (0) #endif #define zpci_err(text...) \ diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 1486a98d5dad..e6a2bdd4d705 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -1,10 +1,6 @@ #ifndef _ASM_S390_PCI_INSN_H #define _ASM_S390_PCI_INSN_H -#include <linux/delay.h> - -#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ - /* Load/Store status codes */ #define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 #define ZPCI_PCI_ST_FUNC_IN_ERR 8 @@ -82,199 +78,12 @@ struct zpci_fib { u64 reserved7; } __packed; -/* Modify PCI Function Controls */ -static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) -{ - u8 cc; - - asm volatile ( - " .insn rxy,0xe300000000d0,%[req],%[fib]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) - : : "cc"); - *status = req >> 24 & 0xff; - return cc; -} - -static inline int mpcifc_instr(u64 req, struct zpci_fib *fib) -{ - u8 cc, status; - - do { - cc = __mpcifc(req, fib, &status); - if (cc == 2) - msleep(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d\n", - __func__, cc, status); - return (cc) ? -EIO : 0; -} - -/* Refresh PCI Translations */ -static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) -{ - register u64 __addr asm("2") = addr; - register u64 __range asm("3") = range; - u8 cc; - - asm volatile ( - " .insn rre,0xb9d30000,%[fn],%[addr]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [fn] "+d" (fn) - : [addr] "d" (__addr), "d" (__range) - : "cc"); - *status = fn >> 24 & 0xff; - return cc; -} - -static inline int rpcit_instr(u64 fn, u64 addr, u64 range) -{ - u8 cc, status; - - do { - cc = __rpcit(fn, addr, range, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", - __func__, cc, status, addr, range); - return (cc) ? -EIO : 0; -} - -/* Store PCI function controls */ -static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status) -{ - u64 fn = (u64) handle << 32 | space << 16; - u8 cc; - - asm volatile ( - " .insn rxy,0xe300000000d4,%[fn],%[fib]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib) - : : "cc"); - *status = fn >> 24 & 0xff; - return cc; -} - -/* Set Interruption Controls */ -static inline void sic_instr(u16 ctl, char *unused, u8 isc) -{ - asm volatile ( - " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" - : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); -} - -/* PCI Load */ -static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status) -{ - register u64 __req asm("2") = req; - register u64 __offset asm("3") = offset; - u64 __data; - u8 cc; - - asm volatile ( - " .insn rre,0xb9d20000,%[data],%[req]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req) - : "d" (__offset) - : "cc"); - *status = __req >> 24 & 0xff; - *data = __data; - return cc; -} - -static inline int pcilg_instr(u64 *data, u64 req, u64 offset) -{ - u8 cc, status; - - do { - cc = __pcilg(data, req, offset, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) { - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); - /* TODO: on IO errors set data to 0xff... - * here or in users of pcilg (le conversion)? - */ - } - return (cc) ? -EIO : 0; -} - -/* PCI Store */ -static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status) -{ - register u64 __req asm("2") = req; - register u64 __offset asm("3") = offset; - u8 cc; - - asm volatile ( - " .insn rre,0xb9d00000,%[data],%[req]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [req] "+d" (__req) - : "d" (__offset), [data] "d" (data) - : "cc"); - *status = __req >> 24 & 0xff; - return cc; -} - -static inline int pcistg_instr(u64 data, u64 req, u64 offset) -{ - u8 cc, status; - - do { - cc = __pcistg(data, req, offset, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); - return (cc) ? -EIO : 0; -} - -/* PCI Store Block */ -static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) -{ - u8 cc; - - asm volatile ( - " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [req] "+d" (req) - : [offset] "d" (offset), [data] "Q" (*data) - : "cc"); - *status = req >> 24 & 0xff; - return cc; -} - -static inline int pcistb_instr(const u64 *data, u64 req, u64 offset) -{ - u8 cc, status; - - do { - cc = __pcistb(data, req, offset, &status); - if (cc == 2) - udelay(ZPCI_INSN_BUSY_DELAY); - } while (cc == 2); - if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); - return (cc) ? -EIO : 0; -} +int s390pci_mod_fc(u64 req, struct zpci_fib *fib); +int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); +int s390pci_load(u64 *data, u64 req, u64 offset); +int s390pci_store(u64 data, u64 req, u64 offset); +int s390pci_store_block(const u64 *data, u64 req, u64 offset); +void set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 5fd81f31d6c7..83a9caa6ae53 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ u64 data; \ int rc; \ \ - rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \ + rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ if (rc) \ data = -1ULL; \ return (RETTYPE) data; \ @@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ u64 data = (VALTYPE) val; \ \ - pcistg_instr(data, req, ZPCI_OFFSET(addr)); \ + s390pci_store(data, req, ZPCI_OFFSET(addr)); \ } zpci_read(8, u64) @@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len val = 0; /* let FW report error */ break; } - return pcistg_instr(val, req, offset); + return s390pci_store(val, req, offset); } static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) { u64 data; - u8 cc; + int cc; + + cc = s390pci_load(&data, req, offset); + if (cc) + goto out; - cc = pcilg_instr(&data, req, offset); switch (len) { case 1: *((u8 *) dst) = (u8) data; @@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) *((u64 *) dst) = (u64) data; break; } +out: return cc; } static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) { - return pcistb_instr(data, req, offset); + return s390pci_store_block(data, req, offset); } static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..4a64c0e5428f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -57,6 +57,10 @@ extern unsigned long zero_page_mask; (((unsigned long)(vaddr)) &zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* @@ -344,6 +348,7 @@ extern unsigned long MODULES_END; #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ /* Bits in the segment table entry */ +#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ @@ -759,6 +764,8 @@ void gmap_disable(struct gmap *gmap); int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long length); int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); +unsigned long __gmap_translate(unsigned long address, struct gmap *); +unsigned long gmap_translate(unsigned long address, struct gmap *); unsigned long __gmap_fault(unsigned long address, struct gmap *); unsigned long gmap_fault(unsigned long address, struct gmap *); void gmap_discard(unsigned long from, unsigned long to, struct gmap *); @@ -1531,7 +1538,8 @@ extern int s390_enable_sie(void); /* * No page table caches to initialise */ -#define pgtable_cache_init() do { } while (0) +static inline void pgtable_cache_init(void) { } +static inline void check_pgt_cache(void) { } #include <asm-generic/pgtable.h> diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 94e749c90230..6b499870662f 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t); extern void show_code(struct pt_regs *regs); extern void print_fn_code(unsigned char *code, unsigned long len); -extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]); +extern int insn_to_mnemonic(unsigned char *instruction, char *buf, + unsigned int len); unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *) \ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 3ee5da3bc10c..559512a455da 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -9,9 +9,7 @@ #include <uapi/asm/ptrace.h> #ifndef __ASSEMBLY__ -#ifndef __s390x__ -#else /* __s390x__ */ -#endif /* __s390x__ */ + extern long psw_kernel_bits; extern long psw_user_bits; @@ -77,8 +75,6 @@ struct per_struct_kernel { #define PER_CONTROL_SUSPENSION 0x00400000UL #define PER_CONTROL_ALTERATION 0x00200000UL -#ifdef __s390x__ -#endif /* __s390x__ */ /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index fe7b99759e12..cd29d2f4e4f3 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -23,6 +23,7 @@ * type here is what we want [need] for both 32 bit and 64 bit systems. */ extern const unsigned int sys_call_table[]; +extern const unsigned int sys_call_table_emu[]; static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 9e2cfe0349c3..eb5f64d26d06 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -14,13 +14,8 @@ #define THREAD_ORDER 1 #define ASYNC_ORDER 1 #else /* CONFIG_64BIT */ -#ifndef __SMALL_STACK #define THREAD_ORDER 2 #define ASYNC_ORDER 2 -#else -#define THREAD_ORDER 1 -#define ASYNC_ORDER 1 -#endif #endif /* CONFIG_64BIT */ #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) @@ -41,6 +36,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ + unsigned long sys_call_table; /* System call table address */ unsigned int cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 1d8fe2b17ef6..6b32af30878c 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct * mm) { - if (unlikely(cpumask_empty(mm_cpumask(mm)))) - return; /* * If the machine has IDTE we prefer to do a per mm flush * on all cpus instead of doing a local flush if the mm diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index a5ca214b34fd..3aa9f1ec5b29 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -215,12 +215,6 @@ typedef struct unsigned long addr; } __attribute__ ((aligned(8))) psw_t; -typedef struct -{ - __u32 mask; - __u32 addr; -} __attribute__ ((aligned(8))) psw_compat_t; - #ifndef __s390x__ #define PSW_MASK_PER 0x40000000UL @@ -295,20 +289,6 @@ typedef struct unsigned long orig_gpr2; } s390_regs; -typedef struct -{ - psw_compat_t psw; - __u32 gprs[NUM_GPRS]; - __u32 acrs[NUM_ACRS]; - __u32 orig_gpr2; -} s390_compat_regs; - -typedef struct -{ - __u32 gprs_high[NUM_GPRS]; -} s390_compat_regs_high; - - /* * Now for the user space program event recording (trace) definitions. * The following structures are used only for the ptrace interface, don't diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index 5acca0a34c20..a61d538756f2 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h @@ -7,9 +7,6 @@ #ifndef _S390_STATFS_H #define _S390_STATFS_H -#ifndef __s390x__ -#include <asm-generic/statfs.h> -#else /* * We can't use <asm-generic/statfs.h> because in 64-bit mode * we mix ints of different sizes in our struct statfs. @@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t; #endif struct statfs { - int f_type; - int f_bsize; - long f_blocks; - long f_bfree; - long f_bavail; - long f_files; - long f_ffree; + unsigned int f_type; + unsigned int f_bsize; + unsigned long f_blocks; + unsigned long f_bfree; + unsigned long f_bavail; + unsigned long f_files; + unsigned long f_ffree; __kernel_fsid_t f_fsid; - int f_namelen; - int f_frsize; - int f_flags; - int f_spare[4]; + unsigned int f_namelen; + unsigned int f_frsize; + unsigned int f_flags; + unsigned int f_spare[4]; }; struct statfs64 { - int f_type; - int f_bsize; - long f_blocks; - long f_bfree; - long f_bavail; - long f_files; - long f_ffree; + unsigned int f_type; + unsigned int f_bsize; + unsigned long f_blocks; + unsigned long f_bfree; + unsigned long f_bavail; + unsigned long f_files; + unsigned long f_ffree; __kernel_fsid_t f_fsid; - int f_namelen; - int f_frsize; - int f_flags; - int f_spare[4]; + unsigned int f_namelen; + unsigned int f_frsize; + unsigned int f_flags; + unsigned int f_spare[4]; }; -struct compat_statfs64 { - __u32 f_type; - __u32 f_bsize; - __u64 f_blocks; - __u64 f_bfree; - __u64 f_bavail; - __u64 f_files; - __u64 f_ffree; - __kernel_fsid_t f_fsid; - __u32 f_namelen; - __u32 f_frsize; - __u32 f_flags; - __u32 f_spare[4]; -}; - -#endif /* __s390x__ */ #endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2ac311ef5c9b..1386fcaf4ef6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -14,16 +14,25 @@ endif CFLAGS_smp.o := -Wno-nonnull # +# Disable tailcall optimizations for stack / callchain walking functions +# since this might generate broken code when accessing register 15 and +# passing its content to other functions. +# +CFLAGS_stacktrace.o += -fno-optimize-sibling-calls +CFLAGS_dumpstack.o += -fno-optimize-sibling-calls + +# # Pass UTS_MACHINE for user_regset definition # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w -obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ - processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ - debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ - sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o +obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o +obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o +obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += dumpstack.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index fface87056eb..7a82f9f70100 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -35,6 +35,7 @@ int main(void) DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); + DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 6de049fbe62d..c439ac9ced09 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* set extra registers only for synchronous signals */ regs->gprs[4] = regs->int_code & 127; regs->gprs[5] = regs->int_parm_long; + regs->gprs[6] = task_thread_info(current)->last_break; } /* Place signal number on stack to allow backtrace from handler. */ @@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, regs->gprs[2] = map_signal(sig); regs->gprs[3] = (__force __u64) &frame->info; regs->gprs[4] = (__force __u64) &frame->uc; + regs->gprs[5] = task_thread_info(current)->last_break; return 0; give_sigsegv: diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 3ad5e9540160..7f4a4a8c847c 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code) * insn_to_mnemonic - decode an s390 instruction * @instruction: instruction to decode * @buf: buffer to fill with mnemonic + * @len: length of buffer * * Decode the instruction at @instruction and store the corresponding - * mnemonic into @buf. + * mnemonic into @buf of length @len. * @buf is left unchanged if the instruction could not be decoded. * Returns: * %0 on success, %-ENOENT if the instruction was not found. */ -int insn_to_mnemonic(unsigned char *instruction, char buf[8]) +int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len) { struct insn *insn; @@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8]) if (!insn) return -ENOENT; if (insn->name[0] == '\0') - snprintf(buf, 8, "%s", + snprintf(buf, len, "%s", long_insn_name[(int) insn->name[1]]); else - snprintf(buf, 8, "%.5s", insn->name); + snprintf(buf, len, "%.5s", insn->name); return 0; } EXPORT_SYMBOL_GPL(insn_to_mnemonic); diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c new file mode 100644 index 000000000000..03dce39d01ee --- /dev/null +++ b/arch/s390/kernel/dumpstack.c @@ -0,0 +1,236 @@ +/* + * Stack dumping functions + * + * Copyright IBM Corp. 1999, 2013 + */ + +#include <linux/kallsyms.h> +#include <linux/hardirq.h> +#include <linux/kprobes.h> +#include <linux/utsname.h> +#include <linux/export.h> +#include <linux/kdebug.h> +#include <linux/ptrace.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <asm/processor.h> +#include <asm/debug.h> +#include <asm/ipl.h> + +#ifndef CONFIG_64BIT +#define LONG "%08lx " +#define FOURLONG "%08lx %08lx %08lx %08lx\n" +static int kstack_depth_to_print = 12; +#else /* CONFIG_64BIT */ +#define LONG "%016lx " +#define FOURLONG "%016lx %016lx %016lx %016lx\n" +static int kstack_depth_to_print = 20; +#endif /* CONFIG_64BIT */ + +/* + * For show_trace we have tree different stack to consider: + * - the panic stack which is used if the kernel stack has overflown + * - the asynchronous interrupt stack (cpu related) + * - the synchronous kernel stack (process related) + * The stack trace can start at any of the three stack and can potentially + * touch all of them. The order is: panic stack, async stack, sync stack. + */ +static unsigned long +__show_trace(unsigned long sp, unsigned long low, unsigned long high) +{ + struct stack_frame *sf; + struct pt_regs *regs; + + while (1) { + sp = sp & PSW_ADDR_INSN; + if (sp < low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); + print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); + /* Follow the backchain. */ + while (1) { + low = sp; + sp = sf->back_chain & PSW_ADDR_INSN; + if (!sp) + break; + if (sp <= low || sp > high - sizeof(*sf)) + return sp; + sf = (struct stack_frame *) sp; + printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); + print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); + } + /* Zero backchain detected, check for interrupt frame. */ + sp = (unsigned long) (sf + 1); + if (sp <= low || sp > high - sizeof(*regs)) + return sp; + regs = (struct pt_regs *) sp; + printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); + print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); + low = sp; + sp = regs->gprs[15]; + } +} + +static void show_trace(struct task_struct *task, unsigned long *stack) +{ + register unsigned long __r15 asm ("15"); + unsigned long sp; + + sp = (unsigned long) stack; + if (!sp) + sp = task ? task->thread.ksp : __r15; + printk("Call Trace:\n"); +#ifdef CONFIG_CHECK_STACK + sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, + S390_lowcore.panic_stack); +#endif + sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, + S390_lowcore.async_stack); + if (task) + __show_trace(sp, (unsigned long) task_stack_page(task), + (unsigned long) task_stack_page(task) + THREAD_SIZE); + else + __show_trace(sp, S390_lowcore.thread_info, + S390_lowcore.thread_info + THREAD_SIZE); + if (!task) + task = current; + debug_show_held_locks(task); +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ + register unsigned long *__r15 asm ("15"); + unsigned long *stack; + int i; + + if (!sp) + stack = task ? (unsigned long *) task->thread.ksp : __r15; + else + stack = sp; + + for (i = 0; i < kstack_depth_to_print; i++) { + if (((addr_t) stack & (THREAD_SIZE-1)) == 0) + break; + if ((i * sizeof(long) % 32) == 0) + printk("%s ", i == 0 ? "" : "\n"); + printk(LONG, *stack++); + } + printk("\n"); + show_trace(task, sp); +} + +static void show_last_breaking_event(struct pt_regs *regs) +{ +#ifdef CONFIG_64BIT + printk("Last Breaking-Event-Address:\n"); + printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); + print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); +#endif +} + +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + printk("CPU: %d %s %s %.*s\n", + task_thread_info(current)->cpu, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + printk("Process %s (pid: %d, task: %p, ksp: %p)\n", + current->comm, current->pid, current, + (void *) current->thread.ksp); + show_stack(NULL, NULL); +} +EXPORT_SYMBOL(dump_stack); + +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + +void show_registers(struct pt_regs *regs) +{ + char *mode; + + mode = user_mode(regs) ? "User" : "Krnl"; + printk("%s PSW : %p %p", + mode, (void *) regs->psw.mask, + (void *) regs->psw.addr); + print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, + regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); + printk(" " FOURLONG, + regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); + printk(" " FOURLONG, + regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); + printk(" " FOURLONG, + regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); + show_code(regs); +} + +void show_regs(struct pt_regs *regs) +{ + printk("CPU: %d %s %s %.*s\n", + task_thread_info(current)->cpu, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + printk("Process %s (pid: %d, task: %p, ksp: %p)\n", + current->comm, current->pid, current, + (void *) current->thread.ksp); + show_registers(regs); + /* Show stack backtrace if pt_regs is from kernel mode */ + if (!user_mode(regs)) + show_trace(NULL, (unsigned long *) regs->gprs[15]); + show_last_breaking_event(regs); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(struct pt_regs *regs, const char *str) +{ + static int die_counter; + + oops_enter(); + lgr_info_log(); + debug_stop_all(); + console_verbose(); + spin_lock_irq(&die_lock); + bust_spinlocks(1); + printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); +#endif +#ifdef CONFIG_SMP + printk("SMP "); +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC + printk("DEBUG_PAGEALLOC"); +#endif + printk("\n"); + notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); + print_modules(); + show_regs(regs); + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception: panic_on_oops"); + oops_exit(); + do_exit(SIGSEGV); +} diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 550228523267..4d5e6f8a7978 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE #define BASED(name) name-system_call(%r13) @@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT sra %r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: l %r15,\stack # load target stack -2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro ADD64 high,low,timer @@ -150,7 +151,7 @@ ENTRY(__switch_to) l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next lr %r15,%r5 - ahi %r15,STACK_SIZE # end of kernel stack of next + ahi %r15,STACK_INIT # end of kernel stack of next st %r3,__LC_CURRENT # store task struct of next st %r5,__LC_THREAD_INFO # store thread info of next st %r15,__LC_KERNEL_STACK # store end of kernel stack @@ -178,7 +179,6 @@ sysc_stm: l %r13,__LC_SVC_NEW_PSW+4 sysc_per: l %r15,__LC_KERNEL_STACK - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER @@ -188,6 +188,7 @@ sysc_vtime: mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC sysc_do_svc: oi __TI_flags+3(%r12),_TIF_SYSCALL + l %r10,__TI_sysc_table(%r12) # 31 bit system call table lh %r8,__PT_INT_CODE+2(%r11) sla %r8,2 # shift and test for svc0 jnz sysc_nr_ok @@ -198,7 +199,6 @@ sysc_do_svc: lr %r8,%r1 sla %r8,2 sysc_nr_ok: - l %r10,BASED(.Lsys_call_table) # 31 bit system call table xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) st %r2,__PT_ORIG_GPR2(%r11) st %r7,STACK_FRAME_OVERHEAD(%r15) @@ -359,11 +359,11 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jnz pgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER l %r15,__LC_KERNEL_STACK -2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stm %r0,%r7,__PT_R0(%r11) mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC stm %r8,%r9,__PT_PSW(%r11) @@ -485,7 +485,6 @@ io_work: # io_work_user: l %r1,__LC_KERNEL_STACK - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -636,7 +635,8 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER mcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 stm %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) @@ -645,7 +645,6 @@ mcck_skip: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return l %r1,__LC_KERNEL_STACK # switch to kernel stack - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r15) @@ -673,6 +672,7 @@ mcck_panic: sra %r14,PAGE_SHIFT jz 0f l %r15,__LC_PANIC_STACK + j mcck_skip 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j mcck_skip @@ -713,12 +713,10 @@ ENTRY(restart_int_handler) */ stack_overflow: l %r15,__LC_PANIC_STACK # change to panic stack - ahi %r15,-__PT_SIZE # create pt_regs - stm %r0,%r7,__PT_R0(%r15) - stm %r8,%r9,__PT_PSW(%r15) + la %r11,STACK_FRAME_OVERHEAD(%r15) + stm %r0,%r7,__PT_R0(%r11) + stm %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(32,%r11),0(%r14) - lr %r15,%r11 - ahi %r15,-STACK_FRAME_OVERHEAD l %r1,BASED(1f) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) lr %r2,%r11 # pass pointer to pt_regs @@ -798,15 +796,14 @@ cleanup_system_call: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER # set up saved register 11 l %r15,__LC_KERNEL_STACK - ahi %r15,-__PT_SIZE - st %r15,12(%r11) # r11 pt_regs pointer + la %r9,STACK_FRAME_OVERHEAD(%r15) + st %r9,12(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC - stm %r0,%r7,__PT_R0(%r15) - mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC + stm %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC # setup saved register 15 - ahi %r15,-STACK_FRAME_OVERHEAD st %r15,28(%r11) # r15 stack pointer # set new psw address and exit l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 @@ -909,7 +906,6 @@ cleanup_idle_wait: .Ltrace_enter: .long do_syscall_trace_enter .Ltrace_exit: .long do_syscall_trace_exit .Lschedule_tail: .long schedule_tail -.Lsys_call_table: .long sys_call_table .Lsysc_per: .long sysc_per + 0x80000000 #ifdef CONFIG_TRACE_IRQFLAGS .Lhardirqs_on: .long trace_hardirqs_on_caller diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index c3a736a3ed44..aa0ab02e9595 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -7,6 +7,7 @@ #include <asm/cputime.h> extern void *restart_stack; +extern unsigned long suspend_zero_pages; void system_call(void); void pgm_check_handler(void); diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9c837c101297..4c17eece707e 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_PER_TRAP ) @@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) srag %r14,%r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: lg %r15,\stack # load target stack -2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro UPDATE_VTIME scratch,enter_timer @@ -177,7 +178,7 @@ ENTRY(__switch_to) lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r5,__THREAD_info(%r3) # get thread_info of next lgr %r15,%r5 - aghi %r15,STACK_SIZE # end of kernel stack of next + aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r5,__LC_THREAD_INFO # store thread info of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack @@ -203,10 +204,8 @@ sysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call sysc_per: lg %r15,__LC_KERNEL_STACK - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER @@ -217,6 +216,7 @@ sysc_vtime: mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC sysc_do_svc: oi __TI_flags+7(%r12),_TIF_SYSCALL + lg %r10,__TI_sysc_table(%r12) # address of system call table llgh %r8,__PT_INT_CODE+2(%r11) slag %r8,%r8,2 # shift and test for svc 0 jnz sysc_nr_ok @@ -227,13 +227,6 @@ sysc_do_svc: sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,2 sysc_nr_ok: - larl %r10,sys_call_table # 64 bit system call table -#ifdef CONFIG_COMPAT - tm __TI_flags+5(%r12),(_TIF_31BIT>>16) - jno sysc_noemu - larl %r10,sys_call_table_emu # 31 bit system call table -sysc_noemu: -#endif xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) @@ -389,6 +382,7 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+3,0x80 # check for per exception jnz pgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER LAST_BREAK %r14 @@ -398,8 +392,7 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 2f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) +2: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -526,7 +519,6 @@ io_work: # io_work_user: lg %r1,__LC_KERNEL_STACK - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -678,8 +670,9 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 mcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA - mvc __PT_R0(128,%r11),0(%r14) + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs @@ -687,7 +680,6 @@ mcck_skip: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) @@ -754,14 +746,12 @@ ENTRY(restart_int_handler) * Setup a pt_regs so that show_trace can provide a good call trace. */ stack_overflow: - lg %r11,__LC_PANIC_STACK # change to panic stack - aghi %r11,-__PT_SIZE # create pt_regs + lg %r15,__LC_PANIC_STACK # change to panic stack + la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(64,%r11),0(%r14) stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 - lgr %r15,%r11 - aghi %r15,-STACK_FRAME_OVERHEAD xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_overflow @@ -845,15 +835,14 @@ cleanup_system_call: mvc __TI_last_break(8,%r12),16(%r11) 0: # set up saved register r11 lg %r15,__LC_KERNEL_STACK - aghi %r15,-__PT_SIZE - stg %r15,24(%r11) # r11 pt_regs pointer + la %r9,STACK_FRAME_OVERHEAD(%r15) + stg %r9,24(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC - stmg %r0,%r7,__PT_R0(%r15) - mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC + stmg %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC # setup saved register r15 - aghi %r15,-STACK_FRAME_OVERHEAD stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,sysc_do_svc @@ -1010,6 +999,7 @@ sys_call_table: #ifdef CONFIG_COMPAT #define SYSCALL(esa,esame,emu) .long emu + .globl sys_call_table_emu sys_call_table_emu: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b3de27700016..ac2178161ec3 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -13,6 +13,7 @@ #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/debug_locks.h> +#include <linux/suspend.h> #include <asm/cio.h> #include <asm/setup.h> #include <asm/pgtable.h> @@ -67,6 +68,35 @@ void setup_regs(void) memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); } +/* + * PM notifier callback for kdump + */ +static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + if (crashk_res.start) + crash_map_reserved_pages(); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + if (crashk_res.start) + crash_unmap_reserved_pages(); + break; + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static int __init machine_kdump_pm_init(void) +{ + pm_notifier(machine_kdump_pm_cb, 0); + return 0; +} +arch_initcall(machine_kdump_pm_init); #endif /* diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5360de85ec7..0f419c5765c8 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -377,11 +377,14 @@ static void __init setup_lowcore(void) PSW_MASK_DAT | PSW_MASK_MCHECK; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; - lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; + lc->kernel_stack = ((unsigned long) &init_thread_union) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->async_stack = (unsigned long) - __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; + __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->panic_stack = (unsigned long) - __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; + __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; @@ -571,6 +574,8 @@ static void __init setup_memory_end(void) /* Split remaining virtual space between 1:1 mapping & vmemmap array */ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ + tmp = SECTION_ALIGN_UP(tmp); tmp = VMALLOC_START - tmp * sizeof(struct page); tmp &= ~((vmax >> 11) - 1); /* align to page table level */ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 549c9d173c0f..8bde89eafd88 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc = pcpu->lowcore; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); - lc->async_stack = pcpu->async_stack + ASYNC_SIZE; - lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; + lc->async_stack = pcpu->async_stack + ASYNC_SIZE + - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->panic_stack = pcpu->panic_stack + PAGE_SIZE + - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->cpu_nr = cpu; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { @@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) struct _lowcore *lc = pcpu->lowcore; struct thread_info *ti = task_thread_info(tsk); - lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; + lc->kernel_stack = (unsigned long) task_stack_page(tsk) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->thread_info = (unsigned long) task_thread_info(tsk); lc->current_task = (unsigned long) tsk; lc->user_timer = ti->user_timer; @@ -810,8 +813,10 @@ void __init smp_prepare_boot_cpu(void) pcpu->state = CPU_STATE_CONFIGURED; pcpu->address = boot_cpu_address; pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); - pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; - pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; + pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE + + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE + + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index aa1494d0e380..c479d2f9605b 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -41,6 +41,7 @@ struct page_key_data { static struct page_key_data *page_key_data; static struct page_key_data *page_key_rp, *page_key_wp; static unsigned long page_key_rx, page_key_wx; +unsigned long suspend_zero_pages; /* * For each page in the hibernation image one additional byte is @@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn) return 0; } +/* + * PM notifier callback for suspend + */ +static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); + if (!suspend_zero_pages) + return NOTIFY_BAD; + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + free_pages(suspend_zero_pages, LC_ORDER); + break; + default: + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static int __init suspend_pm_init(void) +{ + pm_notifier(suspend_pm_cb, 0); + return 0; +} +arch_initcall(suspend_pm_init); + void save_processor_state(void) { /* swsusp_arch_suspend() actually saves all cpu register contents. diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index d4ca4e0617b5..c487be4cfc81 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend) /* Store prefix register on stack */ stpx __SF_EMPTY(%r15) - /* Save prefix register contents for lowcore */ - llgf %r4,__SF_EMPTY(%r15) + /* Save prefix register contents for lowcore copy */ + llgf %r10,__SF_EMPTY(%r15) /* Get pointer to save area */ lghi %r1,0x1000 @@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend) xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) + /* Save absolute zero pages */ + larl %r2,suspend_zero_pages + lg %r2,0(%r2) + lghi %r4,0 + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + + /* Copy lowcore to absolute zero lowcore */ lghi %r2,0 + lgr %r4,%r10 lghi %r3,2*PAGE_SIZE lghi %r5,2*PAGE_SIZE 1: mvcle %r2,%r4,0 @@ -248,8 +259,20 @@ restore_registers: /* Load old stack */ lg %r15,0x2f8(%r13) + /* Save prefix register */ + mvc __SF_EMPTY(4,%r15),0x318(%r13) + + /* Restore absolute zero pages */ + lghi %r2,0 + larl %r4,suspend_zero_pages + lg %r4,0(%r4) + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + /* Restore prefix register */ - spx 0x318(%r13) + spx __SF_EMPTY(%r15) /* Activate DAT */ stosm __SF_EMPTY(%r15),0x04 diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 13dd63fba367..c5762324d9ee 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -12,49 +12,16 @@ * 'Traps.c' handles hardware traps and faults after we have saved some * state in 'asm.s'. */ -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <linux/module.h> #include <linux/ptrace.h> -#include <linux/timer.h> +#include <linux/sched.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/seq_file.h> -#include <linux/delay.h> -#include <linux/module.h> -#include <linux/kdebug.h> -#include <linux/kallsyms.h> -#include <linux/reboot.h> -#include <linux/kprobes.h> -#include <linux/bug.h> -#include <linux/utsname.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <linux/atomic.h> -#include <asm/mathemu.h> -#include <asm/cpcmd.h> -#include <asm/lowcore.h> -#include <asm/debug.h> -#include <asm/ipl.h> #include "entry.h" int show_unhandled_signals = 1; -#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) - -#ifndef CONFIG_64BIT -#define LONG "%08lx " -#define FOURLONG "%08lx %08lx %08lx %08lx\n" -static int kstack_depth_to_print = 12; -#else /* CONFIG_64BIT */ -#define LONG "%016lx " -#define FOURLONG "%016lx %016lx %016lx %016lx\n" -static int kstack_depth_to_print = 20; -#endif /* CONFIG_64BIT */ - static inline void __user *get_trap_ip(struct pt_regs *regs) { #ifdef CONFIG_64BIT @@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) #endif } -/* - * For show_trace we have tree different stack to consider: - * - the panic stack which is used if the kernel stack has overflown - * - the asynchronous interrupt stack (cpu related) - * - the synchronous kernel stack (process related) - * The stack trace can start at any of the three stack and can potentially - * touch all of them. The order is: panic stack, async stack, sync stack. - */ -static unsigned long -__show_trace(unsigned long sp, unsigned long low, unsigned long high) -{ - struct stack_frame *sf; - struct pt_regs *regs; - - while (1) { - sp = sp & PSW_ADDR_INSN; - if (sp < low || sp > high - sizeof(*sf)) - return sp; - sf = (struct stack_frame *) sp; - printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); - /* Follow the backchain. */ - while (1) { - low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; - if (!sp) - break; - if (sp <= low || sp > high - sizeof(*sf)) - return sp; - sf = (struct stack_frame *) sp; - printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); - print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); - } - /* Zero backchain detected, check for interrupt frame. */ - sp = (unsigned long) (sf + 1); - if (sp <= low || sp > high - sizeof(*regs)) - return sp; - regs = (struct pt_regs *) sp; - printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); - print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); - low = sp; - sp = regs->gprs[15]; - } -} - -static void show_trace(struct task_struct *task, unsigned long *stack) -{ - register unsigned long __r15 asm ("15"); - unsigned long sp; - - sp = (unsigned long) stack; - if (!sp) - sp = task ? task->thread.ksp : __r15; - printk("Call Trace:\n"); -#ifdef CONFIG_CHECK_STACK - sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, - S390_lowcore.panic_stack); -#endif - sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); - if (task) - __show_trace(sp, (unsigned long) task_stack_page(task), - (unsigned long) task_stack_page(task) + THREAD_SIZE); - else - __show_trace(sp, S390_lowcore.thread_info, - S390_lowcore.thread_info + THREAD_SIZE); - if (!task) - task = current; - debug_show_held_locks(task); -} - -void show_stack(struct task_struct *task, unsigned long *sp) -{ - register unsigned long * __r15 asm ("15"); - unsigned long *stack; - int i; - - if (!sp) - stack = task ? (unsigned long *) task->thread.ksp : __r15; - else - stack = sp; - - for (i = 0; i < kstack_depth_to_print; i++) { - if (((addr_t) stack & (THREAD_SIZE-1)) == 0) - break; - if ((i * sizeof(long) % 32) == 0) - printk("%s ", i == 0 ? "" : "\n"); - printk(LONG, *stack++); - } - printk("\n"); - show_trace(task, sp); -} - -static void show_last_breaking_event(struct pt_regs *regs) -{ -#ifdef CONFIG_64BIT - printk("Last Breaking-Event-Address:\n"); - printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); - print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); -#endif -} - -/* - * The architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - printk("CPU: %d %s %s %.*s\n", - task_thread_info(current)->cpu, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, current, - (void *) current->thread.ksp); - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); - -static inline int mask_bits(struct pt_regs *regs, unsigned long bits) -{ - return (regs->psw.mask & bits) / ((~bits + 1) & bits); -} - -void show_registers(struct pt_regs *regs) -{ - char *mode; - - mode = user_mode(regs) ? "User" : "Krnl"; - printk("%s PSW : %p %p", - mode, (void *) regs->psw.mask, - (void *) regs->psw.addr); - print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " - "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), - mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), - mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), - mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), - mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), - mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); -#ifdef CONFIG_64BIT - printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); -#endif - printk("\n%s GPRS: " FOURLONG, mode, - regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); - printk(" " FOURLONG, - regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); - printk(" " FOURLONG, - regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); - printk(" " FOURLONG, - regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); - - show_code(regs); -} - -void show_regs(struct pt_regs *regs) -{ - printk("CPU: %d %s %s %.*s\n", - task_thread_info(current)->cpu, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version); - printk("Process %s (pid: %d, task: %p, ksp: %p)\n", - current->comm, current->pid, current, - (void *) current->thread.ksp); - show_registers(regs); - /* Show stack backtrace if pt_regs is from kernel mode */ - if (!user_mode(regs)) - show_trace(NULL, (unsigned long *) regs->gprs[15]); - show_last_breaking_event(regs); -} - -static DEFINE_SPINLOCK(die_lock); - -void die(struct pt_regs *regs, const char *str) -{ - static int die_counter; - - oops_enter(); - lgr_info_log(); - debug_stop_all(); - console_verbose(); - spin_lock_irq(&die_lock); - bust_spinlocks(1); - printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); -#endif -#ifdef CONFIG_SMP - printk("SMP "); -#endif -#ifdef CONFIG_DEBUG_PAGEALLOC - printk("DEBUG_PAGEALLOC"); -#endif - printk("\n"); - notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); - print_modules(); - show_regs(regs); - bust_spinlocks(0); - add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - spin_unlock_irq(&die_lock); - if (in_interrupt()) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); - oops_exit(); - do_exit(SIGSEGV); -} - static inline void report_user_fault(struct pt_regs *regs, int signr) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h index 2b29e62351d3..53252d2d4720 100644 --- a/arch/s390/kvm/trace.h +++ b/arch/s390/kvm/trace.h @@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction, __entry->instruction, insn_to_mnemonic((unsigned char *) &__entry->instruction, - __entry->insn) ? + __entry->insn, sizeof(__entry->insn)) ? "unknown" : __entry->insn) ); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address * contains the (negative) exception code. */ -static __always_inline unsigned long follow_table(struct mm_struct *mm, - unsigned long addr, int write) +#ifdef CONFIG_64BIT +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; + unsigned long *table = (unsigned long *)__pa(mm->pgd); + + switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { + case _ASCE_TYPE_REGION1: + table = table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x39UL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION2: + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3aUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_REGION3: + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -0x3bUL; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + case _ASCE_TYPE_SEGMENT: + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) + return -0x10UL; + if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { + if (write && (*table & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + + (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); + } + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + } + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) + return -0x11UL; + if (write && (*table & _PAGE_RO)) + return -0x04UL; + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); +} - pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return -0x3aUL; +#else /* CONFIG_64BIT */ - pud = pud_offset(pgd, addr); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return -0x3bUL; +static unsigned long follow_table(struct mm_struct *mm, + unsigned long address, int write) +{ + unsigned long *table = (unsigned long *)__pa(mm->pgd); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + table = table + ((address >> 20) & 0x7ff); + if (unlikely(*table & _SEGMENT_ENTRY_INV)) return -0x10UL; - if (pmd_large(*pmd)) { - if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) - return -0x04UL; - return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); - } - if (unlikely(pmd_bad(*pmd))) - return -0x10UL; - - ptep = pte_offset_map(pmd, addr); - if (!pte_present(*ptep)) + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); + table = table + ((address >> 12) & 0xff); + if (unlikely(*table & _PAGE_INVALID)) return -0x11UL; - if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) + if (write && (*table & _PAGE_RO)) return -0x04UL; - - return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); } +#endif /* CONFIG_64BIT */ + static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { @@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) static size_t clear_user_pt(size_t n, void __user *to) { - void *zpage = &empty_zero_page; + void *zpage = (void *) empty_zero_page; long done, size, ret; done = 0; diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 479e94282910..9d84a1feefef 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -458,12 +458,10 @@ static int __init cmm_init(void) if (rc) goto out_pm; cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); - rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; - if (rc) - goto out_kthread; - return 0; + if (!IS_ERR(cmm_thread_ptr)) + return 0; -out_kthread: + rc = PTR_ERR(cmm_thread_ptr); unregister_pm_notifier(&cmm_power_notifier); out_pm: unregister_oom_notifier(&cmm_oom_nb); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2fb9e63b8fc4..047c3e4c59a2 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs) int fault; trans_exc_code = regs->int_parm_long; - /* Protection exception is suppressing, decrement psw address. */ - regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); + /* + * Protection exceptions are suppressing, decrement psw address. + * The exception to this rule are aborted transactions, for these + * the PSW already points to the correct location. + */ + if (!(regs->int_code & 0x200)) + regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 49ce6bb2c641..9f9c315b4c07 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -63,10 +63,18 @@ static unsigned long __init setup_zero_pages(void) break; case 0x2097: /* z10 */ case 0x2098: /* z10 */ - default: + case 0x2817: /* z196 */ + case 0x2818: /* z196 */ order = 2; break; + case 0x2827: /* zEC12 */ + default: + order = 5; + break; } + /* Limit number of empty zero pages for small memory sizes */ + if (order > 2 && totalram_pages <= 16384) + order = 2; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index d21040ed5e59..80adfbf75065 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -9,31 +9,25 @@ #include <asm/pgtable.h> #include <asm/page.h> +static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) +{ + asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" + : [addr] "+a" (addr) : [skey] "d" (skey)); + return addr; +} + void storage_key_init_range(unsigned long start, unsigned long end) { - unsigned long boundary, function, size; + unsigned long boundary, size; while (start < end) { - if (MACHINE_HAS_EDAT2) { - /* set storage keys for a 2GB frame */ - function = 0x22000 | PAGE_DEFAULT_KEY; - size = 1UL << 31; - boundary = (start + size) & ~(size - 1); - if (boundary <= end) { - do { - start = pfmf(function, start); - } while (start < boundary); - continue; - } - } if (MACHINE_HAS_EDAT1) { /* set storage keys for a 1MB frame */ - function = 0x21000 | PAGE_DEFAULT_KEY; size = 1UL << 20; boundary = (start + size) & ~(size - 1); if (boundary <= end) { do { - start = pfmf(function, start); + start = sske_frame(start, PAGE_DEFAULT_KEY); } while (start < boundary); continue; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index ae44d2a34313..bd954e96f51c 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -379,75 +379,183 @@ out_unmap: } EXPORT_SYMBOL_GPL(gmap_map_segment); -/* - * this function is assumed to be called with mmap_sem held - */ -unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) +static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) { - unsigned long *table, vmaddr, segment; - struct mm_struct *mm; - struct gmap_pgtable *mp; - struct gmap_rmap *rmap; - struct vm_area_struct *vma; - struct page *page; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; + unsigned long *table; - current->thread.gmap_addr = address; - mm = gmap->mm; - /* Walk the gmap address space page table */ table = gmap->table + ((address >> 53) & 0x7ff); if (unlikely(*table & _REGION_ENTRY_INV)) - return -EFAULT; + return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 42) & 0x7ff); if (unlikely(*table & _REGION_ENTRY_INV)) - return -EFAULT; + return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 31) & 0x7ff); if (unlikely(*table & _REGION_ENTRY_INV)) - return -EFAULT; + return ERR_PTR(-EFAULT); table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = table + ((address >> 20) & 0x7ff); + return table; +} + +/** + * __gmap_translate - translate a guest address to a user space address + * @address: guest address + * @gmap: pointer to guest mapping meta data structure + * + * Returns user space address which corresponds to the guest address or + * -EFAULT if no such mapping exists. + * This function does not establish potentially missing page table entries. + * The mmap_sem of the mm that belongs to the address space must be held + * when this function gets called. + */ +unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) +{ + unsigned long *segment_ptr, vmaddr, segment; + struct gmap_pgtable *mp; + struct page *page; + current->thread.gmap_addr = address; + segment_ptr = gmap_table_walk(address, gmap); + if (IS_ERR(segment_ptr)) + return PTR_ERR(segment_ptr); /* Convert the gmap address to an mm address. */ - segment = *table; - if (likely(!(segment & _SEGMENT_ENTRY_INV))) { + segment = *segment_ptr; + if (!(segment & _SEGMENT_ENTRY_INV)) { page = pfn_to_page(segment >> PAGE_SHIFT); mp = (struct gmap_pgtable *) page->index; return mp->vmaddr | (address & ~PMD_MASK); } else if (segment & _SEGMENT_ENTRY_RO) { vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; - vma = find_vma(mm, vmaddr); - if (!vma || vma->vm_start > vmaddr) - return -EFAULT; - - /* Walk the parent mm page table */ - pgd = pgd_offset(mm, vmaddr); - pud = pud_alloc(mm, pgd, vmaddr); - if (!pud) - return -ENOMEM; - pmd = pmd_alloc(mm, pud, vmaddr); - if (!pmd) - return -ENOMEM; - if (!pmd_present(*pmd) && - __pte_alloc(mm, vma, pmd, vmaddr)) - return -ENOMEM; - /* pmd now points to a valid segment table entry. */ - rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); - if (!rmap) - return -ENOMEM; - /* Link gmap segment table entry location to page table. */ - page = pmd_page(*pmd); - mp = (struct gmap_pgtable *) page->index; - rmap->entry = table; - spin_lock(&mm->page_table_lock); + return vmaddr | (address & ~PMD_MASK); + } + return -EFAULT; +} +EXPORT_SYMBOL_GPL(__gmap_translate); + +/** + * gmap_translate - translate a guest address to a user space address + * @address: guest address + * @gmap: pointer to guest mapping meta data structure + * + * Returns user space address which corresponds to the guest address or + * -EFAULT if no such mapping exists. + * This function does not establish potentially missing page table entries. + */ +unsigned long gmap_translate(unsigned long address, struct gmap *gmap) +{ + unsigned long rc; + + down_read(&gmap->mm->mmap_sem); + rc = __gmap_translate(address, gmap); + up_read(&gmap->mm->mmap_sem); + return rc; +} +EXPORT_SYMBOL_GPL(gmap_translate); + +static int gmap_connect_pgtable(unsigned long segment, + unsigned long *segment_ptr, + struct gmap *gmap) +{ + unsigned long vmaddr; + struct vm_area_struct *vma; + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct mm_struct *mm; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + mm = gmap->mm; + vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; + vma = find_vma(mm, vmaddr); + if (!vma || vma->vm_start > vmaddr) + return -EFAULT; + /* Walk the parent mm page table */ + pgd = pgd_offset(mm, vmaddr); + pud = pud_alloc(mm, pgd, vmaddr); + if (!pud) + return -ENOMEM; + pmd = pmd_alloc(mm, pud, vmaddr); + if (!pmd) + return -ENOMEM; + if (!pmd_present(*pmd) && + __pte_alloc(mm, vma, pmd, vmaddr)) + return -ENOMEM; + /* pmd now points to a valid segment table entry. */ + rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); + if (!rmap) + return -ENOMEM; + /* Link gmap segment table entry location to page table. */ + page = pmd_page(*pmd); + mp = (struct gmap_pgtable *) page->index; + rmap->entry = segment_ptr; + spin_lock(&mm->page_table_lock); + if (*segment_ptr == segment) { list_add(&rmap->list, &mp->mapper); - spin_unlock(&mm->page_table_lock); /* Set gmap segment table entry to page table. */ - *table = pmd_val(*pmd) & PAGE_MASK; - return vmaddr | (address & ~PMD_MASK); + *segment_ptr = pmd_val(*pmd) & PAGE_MASK; + rmap = NULL; + } + spin_unlock(&mm->page_table_lock); + kfree(rmap); + return 0; +} + +static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) +{ + struct gmap_rmap *rmap, *next; + struct gmap_pgtable *mp; + struct page *page; + int flush; + + flush = 0; + spin_lock(&mm->page_table_lock); + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry_safe(rmap, next, &mp->mapper, list) { + *rmap->entry = + _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + list_del(&rmap->list); + kfree(rmap); + flush = 1; + } + spin_unlock(&mm->page_table_lock); + if (flush) + __tlb_flush_global(); +} + +/* + * this function is assumed to be called with mmap_sem held + */ +unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) +{ + unsigned long *segment_ptr, segment; + struct gmap_pgtable *mp; + struct page *page; + int rc; + + current->thread.gmap_addr = address; + segment_ptr = gmap_table_walk(address, gmap); + if (IS_ERR(segment_ptr)) + return -EFAULT; + /* Convert the gmap address to an mm address. */ + while (1) { + segment = *segment_ptr; + if (!(segment & _SEGMENT_ENTRY_INV)) { + /* Page table is present */ + page = pfn_to_page(segment >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + return mp->vmaddr | (address & ~PMD_MASK); + } + if (!(segment & _SEGMENT_ENTRY_RO)) + /* Nothing mapped in the gmap address space. */ + break; + rc = gmap_connect_pgtable(segment, segment_ptr, gmap); + if (rc) + return rc; } return -EFAULT; } @@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) } EXPORT_SYMBOL_GPL(gmap_discard); -void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) -{ - struct gmap_rmap *rmap, *next; - struct gmap_pgtable *mp; - struct page *page; - int flush; - - flush = 0; - spin_lock(&mm->page_table_lock); - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - mp = (struct gmap_pgtable *) page->index; - list_for_each_entry_safe(rmap, next, &mp->mapper, list) { - *rmap->entry = - _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; - list_del(&rmap->list); - kfree(rmap); - flush = 1; - } - spin_unlock(&mm->page_table_lock); - if (flush) - __tlb_flush_global(); -} - static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, unsigned long vmaddr) { @@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table) { } -static inline void gmap_unmap_notifier(struct mm_struct *mm, - unsigned long *table) +static inline void gmap_disconnect_pgtable(struct mm_struct *mm, + unsigned long *table) { } @@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) unsigned int bit, mask; if (mm_has_pgste(mm)) { - gmap_unmap_notifier(mm, table); + gmap_disconnect_pgtable(mm, table); return page_table_free_pgste(table); } /* Free 1K/2K page table fragment of a 4K page */ @@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) mm = tlb->mm; if (mm_has_pgste(mm)) { - gmap_unmap_notifier(mm, table); + gmap_disconnect_pgtable(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK); tlb_remove_table(tlb, table); return; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0972e91cced2..82f165f8078c 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp) if (!bpf_jit_enable) return; - addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL); + addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; - memset(addrs, 0, fp->len * sizeof(*addrs)); memset(&jit, 0, sizeof(cjit)); memset(&cjit, 0, sizeof(cjit)); diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index f0f426a113ce..086a2e37935d 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -2,5 +2,5 @@ # Makefile for the s390 PCI subsystem. # -obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \ - pci_sysfs.o pci_event.o pci_debug.o +obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \ + pci_event.o pci_debug.o pci_insn.o diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 27b4c17855b9..e6f15b5d8b7d 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -99,9 +99,6 @@ static int __read_mostly aisb_max; static struct kmem_cache *zdev_irq_cache; static struct kmem_cache *zdev_fmb_cache; -debug_info_t *pci_debug_msg_id; -debug_info_t *pci_debug_err_id; - static inline int irq_to_msi_nr(unsigned int irq) { return irq & ZPCI_MSI_MASK; @@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, fib->aisb = (u64) bucket->aisb + aisb / 8; fib->aisbo = aisb & ZPCI_MSI_MASK; - rc = mpcifc_instr(req, fib); + rc = s390pci_mod_fc(req, fib); pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); free_page((unsigned long) fib); @@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args fib->iota = args->iota; fib->fmb_addr = args->fmb_addr; - rc = mpcifc_instr(req, fib); + rc = s390pci_mod_fc(req, fib); free_page((unsigned long) fib); return rc; } @@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev) if (zdev->fmb) return -EINVAL; - zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); + zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); if (!zdev->fmb) return -ENOMEM; - memset(zdev->fmb, 0, sizeof(*zdev->fmb)); WARN_ON((u64) zdev->fmb & 0xf); args.fmb_addr = virt_to_phys(zdev->fmb); @@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) u64 data; int rc; - rc = pcilg_instr(&data, req, offset); - data = data << ((8 - len) * 8); - data = le64_to_cpu(data); - if (!rc) + rc = s390pci_load(&data, req, offset); + if (!rc) { + data = data << ((8 - len) * 8); + data = le64_to_cpu(data); *val = (u32) data; - else + } else *val = 0xffffffff; return rc; } @@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) data = cpu_to_le64(data); data = data >> ((8 - len) * 8); - rc = pcistg_instr(data, req, offset); + rc = s390pci_store(data, req, offset); return rc; } @@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct zpci_dev *zdev = get_zdev_by_bus(bus); + int ret; if (!zdev || devfn != ZPCI_DEVFN) - return 0; - return zpci_cfg_load(zdev, where, val, size); + ret = -ENODEV; + else + ret = zpci_cfg_load(zdev, where, val, size); + + return ret; } static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct zpci_dev *zdev = get_zdev_by_bus(bus); + int ret; if (!zdev || devfn != ZPCI_DEVFN) - return 0; - return zpci_cfg_store(zdev, where, val, size); + ret = -ENODEV; + else + ret = zpci_cfg_store(zdev, where, val, size); + + return ret; } static struct pci_ops pci_root_ops = { @@ -474,7 +478,7 @@ scan: } /* enable interrupts again */ - sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); /* check again to not lose initiative */ rmb(); @@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev) } }; -static void zpci_unmap_resources(struct pci_dev *pdev) -{ - resource_size_t len; - int i; - - for (i = 0; i < PCI_BAR_COUNT; i++) { - len = pci_resource_len(pdev, i); - if (!len) - continue; - pci_iounmap(pdev, (void *) pdev->resource[i].start); - } -}; - struct zpci_dev *zpci_alloc_device(void) { struct zpci_dev *zdev; @@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev) kfree(zdev); } -/* Called on removal of pci_dev, leaves zpci and bus device */ -static void zpci_remove_device(struct pci_dev *pdev) -{ - struct zpci_dev *zdev = get_zdev(pdev); - - dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); - zdev->state = ZPCI_FN_STATE_CONFIGURED; - zpci_dma_exit_device(zdev); - zpci_fmb_disable_device(zdev); - zpci_sysfs_remove_device(&pdev->dev); - zpci_unmap_resources(pdev); - list_del(&zdev->entry); /* can be called from init */ - zdev->pdev = NULL; -} - -static void zpci_scan_devices(void) -{ - struct zpci_dev *zdev; - - mutex_lock(&zpci_list_lock); - list_for_each_entry(zdev, &zpci_list, entry) - if (zdev->state == ZPCI_FN_STATE_CONFIGURED) - zpci_scan_device(zdev); - mutex_unlock(&zpci_list_lock); -} - /* * Too late for any s390 specific setup, since interrupts must be set up * already which requires DMA setup too and the pci scan will access the @@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) return 0; } -void pcibios_disable_device(struct pci_dev *pdev) -{ - zpci_remove_device(pdev); - pdev->sysdata = NULL; -} - int pcibios_add_platform_entries(struct pci_dev *pdev) { return zpci_sysfs_add_device(&pdev->dev); @@ -789,7 +748,7 @@ static int __init zpci_irq_init(void) spin_lock_init(&bucket->lock); /* set summary to 1 to be called every time for the ISC */ *zpci_irq_si = 1; - sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); return 0; out_ai: @@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) spin_unlock(&zpci_iomap_lock); } -static int zpci_create_device_bus(struct zpci_dev *zdev) +int pcibios_add_device(struct pci_dev *pdev) +{ + struct zpci_dev *zdev = get_zdev(pdev); + + zdev->pdev = pdev; + zpci_debug_init_device(zdev); + zpci_fmb_enable_device(zdev); + zpci_map_resources(zdev); + + return 0; +} + +static int zpci_scan_bus(struct zpci_dev *zdev) { struct resource *res; LIST_HEAD(resources); @@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev) pci_add_resource(&resources, res); } - zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, - zdev, &resources); + zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, + zdev, &resources); if (!zdev->bus) return -EIO; @@ -959,6 +930,13 @@ out: } EXPORT_SYMBOL_GPL(zpci_enable_device); +int zpci_disable_device(struct zpci_dev *zdev) +{ + zpci_dma_exit_device(zdev); + return clp_disable_fh(zdev); +} +EXPORT_SYMBOL_GPL(zpci_disable_device); + int zpci_create_device(struct zpci_dev *zdev) { int rc; @@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev) if (rc) goto out; - rc = zpci_create_device_bus(zdev); + if (zdev->state == ZPCI_FN_STATE_CONFIGURED) { + rc = zpci_enable_device(zdev); + if (rc) + goto out_free; + + zdev->state = ZPCI_FN_STATE_ONLINE; + } + rc = zpci_scan_bus(zdev); if (rc) - goto out_bus; + goto out_disable; mutex_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); @@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev) hotplug_ops->create_slot(zdev); mutex_unlock(&zpci_list_lock); - if (zdev->state == ZPCI_FN_STATE_STANDBY) - return 0; - - rc = zpci_enable_device(zdev); - if (rc) - goto out_start; return 0; -out_start: - mutex_lock(&zpci_list_lock); - list_del(&zdev->entry); - if (hotplug_ops) - hotplug_ops->remove_slot(zdev); - mutex_unlock(&zpci_list_lock); -out_bus: +out_disable: + if (zdev->state == ZPCI_FN_STATE_ONLINE) + zpci_disable_device(zdev); +out_free: zpci_free_domain(zdev); out: return rc; @@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev) goto out; } - zpci_debug_init_device(zdev); - zpci_fmb_enable_device(zdev); - zpci_map_resources(zdev); pci_bus_add_devices(zdev->bus); - /* now that pdev was added to the bus mark it as used */ - zdev->state = ZPCI_FN_STATE_ONLINE; return 0; - out: zpci_dma_exit_device(zdev); clp_disable_fh(zdev); @@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void) } EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); -unsigned int s390_pci_probe = 1; +unsigned int s390_pci_probe; EXPORT_SYMBOL_GPL(s390_pci_probe); char * __init pcibios_setup(char *str) { - if (!strcmp(str, "off")) { - s390_pci_probe = 0; + if (!strcmp(str, "on")) { + s390_pci_probe = 1; return NULL; } return str; @@ -1138,7 +1108,6 @@ static int __init pci_base_init(void) if (rc) goto out_find; - zpci_scan_devices(); return 0; out_find: diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index f339fe2feb15..bd34359d1546 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/delay.h> #include <linux/pci.h> +#include <asm/pci_debug.h> #include <asm/pci_clp.h> /* @@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured) struct zpci_dev *zdev; int rc; + zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured); zdev = zpci_alloc_device(); if (IS_ERR(zdev)) return PTR_ERR(zdev); @@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) *fh = rrb->response.fh; else { - pr_err("Set PCI FN failed with response: %x cc: %d\n", - rrb->response.hdr.rsp, rc); + zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc, + rrb->response.hdr.rsp); rc = -EIO; } clp_free_block(rrb); @@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as) if (!rc) /* Success -> store enabled handle in zdev */ zdev->fh = fh; + + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } @@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev) if (!rc) /* Success -> store disabled handle in zdev */ zdev->fh = fh; - else - dev_err(&zdev->pdev->dev, - "Failed to disable fn handle: 0x%x\n", fh); + + zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); return rc; } diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index a5d07bc2a547..771b82359af4 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -11,12 +11,17 @@ #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/export.h> #include <linux/pci.h> #include <asm/debug.h> #include <asm/pci_dma.h> static struct dentry *debugfs_root; +debug_info_t *pci_debug_msg_id; +EXPORT_SYMBOL_GPL(pci_debug_msg_id); +debug_info_t *pci_debug_err_id; +EXPORT_SYMBOL_GPL(pci_debug_err_id); static char *pci_perf_names[] = { /* hardware counters */ @@ -168,7 +173,6 @@ int __init zpci_debug_init(void) return -EINVAL; debug_register_view(pci_debug_msg_id, &debug_sprintf_view); debug_set_level(pci_debug_msg_id, 3); - zpci_dbg("Debug view initialized\n"); /* error log */ pci_debug_err_id = debug_register("pci_error", 2, 1, 16); @@ -176,7 +180,6 @@ int __init zpci_debug_init(void) return -EINVAL; debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); debug_set_level(pci_debug_err_id, 6); - zpci_err("Debug view initialized\n"); debugfs_root = debugfs_create_dir("pci", NULL); return 0; diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index a547419907c3..f8e69d5bc0a9 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, * needs to be redone! */ goto no_refresh; - rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr, - nr_pages * PAGE_SIZE); + + rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, + nr_pages * PAGE_SIZE); no_refresh: spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); @@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, int flags = ZPCI_PTE_VALID; dma_addr_t dma_addr; - WARN_ON_ONCE(offset > PAGE_SIZE); - /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); iommu_page_index = dma_alloc_iommu(zdev, nr_pages); @@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); - return dma_addr + offset; + return dma_addr + (offset & ~PAGE_MASK); } out_free: diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c new file mode 100644 index 000000000000..22eeb9d7ffeb --- /dev/null +++ b/arch/s390/pci/pci_insn.c @@ -0,0 +1,202 @@ +/* + * s390 specific pci instructions + * + * Copyright IBM Corp. 2013 + */ + +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <asm/pci_insn.h> +#include <asm/processor.h> + +#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ + +/* Modify PCI Function Controls */ +static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) +{ + u8 cc; + + asm volatile ( + " .insn rxy,0xe300000000d0,%[req],%[fib]\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) + : : "cc"); + *status = req >> 24 & 0xff; + return cc; +} + +int s390pci_mod_fc(u64 req, struct zpci_fib *fib) +{ + u8 cc, status; + + do { + cc = __mpcifc(req, fib, &status); + if (cc == 2) + msleep(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d\n", + __func__, cc, status); + return (cc) ? -EIO : 0; +} + +/* Refresh PCI Translations */ +static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) +{ + register u64 __addr asm("2") = addr; + register u64 __range asm("3") = range; + u8 cc; + + asm volatile ( + " .insn rre,0xb9d30000,%[fn],%[addr]\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=d" (cc), [fn] "+d" (fn) + : [addr] "d" (__addr), "d" (__range) + : "cc"); + *status = fn >> 24 & 0xff; + return cc; +} + +int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) +{ + u8 cc, status; + + do { + cc = __rpcit(fn, addr, range, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", + __func__, cc, status, addr, range); + return (cc) ? -EIO : 0; +} + +/* Set Interruption Controls */ +void set_irq_ctrl(u16 ctl, char *unused, u8 isc) +{ + asm volatile ( + " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" + : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); +} + +/* PCI Load */ +static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) +{ + register u64 __req asm("2") = req; + register u64 __offset asm("3") = offset; + int cc = -ENXIO; + u64 __data; + + asm volatile ( + " .insn rre,0xb9d20000,%[data],%[req]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req) + : "d" (__offset) + : "cc"); + *status = __req >> 24 & 0xff; + if (!cc) + *data = __data; + + return cc; +} + +int s390pci_load(u64 *data, u64 req, u64 offset) +{ + u8 status; + int cc; + + do { + cc = __pcilg(data, req, offset, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", + __func__, cc, status, req, offset); + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(s390pci_load); + +/* PCI Store */ +static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) +{ + register u64 __req asm("2") = req; + register u64 __offset asm("3") = offset; + int cc = -ENXIO; + + asm volatile ( + " .insn rre,0xb9d00000,%[data],%[req]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [req] "+d" (__req) + : "d" (__offset), [data] "d" (data) + : "cc"); + *status = __req >> 24 & 0xff; + return cc; +} + +int s390pci_store(u64 data, u64 req, u64 offset) +{ + u8 status; + int cc; + + do { + cc = __pcistg(data, req, offset, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", + __func__, cc, status, req, offset); + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(s390pci_store); + +/* PCI Store Block */ +static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) +{ + int cc = -ENXIO; + + asm volatile ( + " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" + "0: ipm %[cc]\n" + " srl %[cc],28\n" + "1:\n" + EX_TABLE(0b, 1b) + : [cc] "+d" (cc), [req] "+d" (req) + : [offset] "d" (offset), [data] "Q" (*data) + : "cc"); + *status = req >> 24 & 0xff; + return cc; +} + +int s390pci_store_block(const u64 *data, u64 req, u64 offset) +{ + u8 status; + int cc; + + do { + cc = __pcistb(data, req, offset, &status); + if (cc == 2) + udelay(ZPCI_INSN_BUSY_DELAY); + } while (cc == 2); + + if (cc) + printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", + __func__, cc, status, req, offset); + return (cc > 0) ? -EIO : cc; +} +EXPORT_SYMBOL_GPL(s390pci_store_block); diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c index 0297931335e1..b097aed05a9b 100644 --- a/arch/s390/pci/pci_msi.c +++ b/arch/s390/pci/pci_msi.c @@ -18,8 +18,9 @@ /* mapping of irq numbers to msi_desc */ static struct hlist_head *msi_hash; -static unsigned int msihash_shift = 6; -#define msi_hashfn(nr) hash_long(nr, msihash_shift) +static const unsigned int msi_hash_bits = 8; +#define MSI_HASH_BUCKETS (1U << msi_hash_bits) +#define msi_hashfn(nr) hash_long(nr, msi_hash_bits) static DEFINE_SPINLOCK(msi_map_lock); @@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi, map->irq = nr; map->msi = msi; zdev->msi_map[nr & ZPCI_MSI_MASK] = map; + INIT_HLIST_NODE(&map->msi_chain); pr_debug("%s hashing irq: %u to bucket nr: %llu\n", __func__, nr, msi_hashfn(nr)); @@ -125,11 +127,11 @@ int __init zpci_msihash_init(void) { unsigned int i; - msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL); + msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL); if (!msi_hash) return -ENOMEM; - for (i = 0; i < (1U << msihash_shift); i++) + for (i = 0; i < MSI_HASH_BUCKETS; i++) INIT_HLIST_HEAD(&msi_hash[i]); return 0; } diff --git a/arch/score/Kconfig b/arch/score/Kconfig index e569aa1fd2ba..c8def8bc9020 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -12,7 +12,7 @@ config SCORE select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS select HAVE_MOD_ARCH_SPECIFIC - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_REL select CLONE_BACKWARDS diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 289127d5241c..3d361f236308 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -84,12 +84,6 @@ config ARCH_DEFCONFIG default "arch/sparc/configs/sparc32_defconfig" if SPARC32 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 -# CONFIG_BITS can be used at source level to get 32/64 bits -config BITS - int - default 32 if SPARC32 - default 64 if SPARC64 - config IOMMU_HELPER bool default y if SPARC64 @@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM config GENERIC_HWEIGHT bool - default y if !ULTRA_HAS_POPULATION_COUNT + default y config GENERIC_CALIBRATE_DELAY bool diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index e26d430ce2fd..ff18e3cfb6b1 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -2,11 +2,16 @@ generic-y += clkdev.h +generic-y += cputime.h generic-y += div64.h +generic-y += emergency-restart.h generic-y += exec.h generic-y += local64.h +generic-y += mutex.h generic-y += irq_regs.h generic-y += local.h generic-y += module.h +generic-y += serial.h generic-y += trace_clock.h +generic-y += types.h generic-y += word-at-a-time.h diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h deleted file mode 100644 index 1a642b81e019..000000000000 --- a/arch/sparc/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_CPUTIME_H -#define __SPARC_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __SPARC_CPUTIME_H */ diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sparc/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/arch/sparc/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 08fcce90316b..7619f2f792af 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } +#include <asm/tlbflush.h> #include <asm-generic/pgtable.h> /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h deleted file mode 100644 index f90d61c28059..000000000000 --- a/arch/sparc/include/asm/serial.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC_SERIAL_H -#define __SPARC_SERIAL_H - -#define BASE_BAUD ( 1843200 / 16 ) - -#endif /* __SPARC_SERIAL_H */ diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index b73da3c5f10a..3c8917f054de 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); void cpu_panic(void); -extern void smp4m_irq_rotate(int cpu); /* * General functions that each host system must provide. @@ -46,7 +45,6 @@ void sun4m_init_smp(void); void sun4d_init_smp(void); void smp_callin(void); -void smp_boot_cpus(void); void smp_store_cpu_info(int); void smp_resched_interrupt(void); @@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) -#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier -#define prof_counter(__cpu) cpu_data(__cpu).counter - void smp_setup_cpu_possible_map(void); #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index d06a26601753..6b67e50fb9b4 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h @@ -45,6 +45,7 @@ #define SUN4V_CHIP_NIAGARA3 0x03 #define SUN4V_CHIP_NIAGARA4 0x04 #define SUN4V_CHIP_NIAGARA5 0x05 +#define SUN4V_CHIP_SPARC64X 0x8a #define SUN4V_CHIP_UNKNOWN 0xff #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index cad36f56fa03..c7de3323819c 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -18,8 +18,7 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ +do { save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef463494153..f0d6a9700f4c 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; + unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr) flush_tlb_pending() -#define flush_tlb_mm(mm) flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode() do {} while (0) /* Local cpu only. */ extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} + #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) +#define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ce175aff71b7..b5843ee09fb5 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -44,7 +44,6 @@ header-y += swab.h header-y += termbits.h header-y += termios.h header-y += traps.h -header-y += types.h header-y += uctx.h header-y += unistd.h header-y += utrap.h diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h deleted file mode 100644 index 383d156cde9c..000000000000 --- a/arch/sparc/include/uapi/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _SPARC_TYPES_H -#define _SPARC_TYPES_H -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -#if defined(__sparc__) - -#include <asm-generic/int-ll64.h> - -#endif /* defined(__sparc__) */ - -#endif /* defined(_SPARC_TYPES_H) */ diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index a6c94a2bf9d4..5c5125895db8 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void) sparc_pmu_type = "niagara5"; break; + case SUN4V_CHIP_SPARC64X: + sparc_cpu_type = "SPARC64-X"; + sparc_fpu_type = "SPARC64-X integrated FPU"; + sparc_pmu_type = "sparc64-x"; + break; + default: printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", prom_cpu_compatible); diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 2feb15c35d9e..26b706a1867d 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -134,6 +134,8 @@ prom_niagara_prefix: .asciz "SUNW,UltraSPARC-T" prom_sparc_prefix: .asciz "SPARC-" +prom_sparc64x_prefix: + .asciz "SPARC64-X" .align 4 prom_root_compatible: .skip 64 @@ -412,7 +414,7 @@ sun4v_chip_type: cmp %g2, 'T' be,pt %xcc, 70f cmp %g2, 'M' - bne,pn %xcc, 4f + bne,pn %xcc, 49f nop 70: ldub [%g1 + 7], %g2 @@ -425,7 +427,7 @@ sun4v_chip_type: cmp %g2, '5' be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA5, %g4 - ba,pt %xcc, 4f + ba,pt %xcc, 49f nop 91: sethi %hi(prom_cpu_compatible), %g1 @@ -439,6 +441,25 @@ sun4v_chip_type: mov SUN4V_CHIP_NIAGARA2, %g4 4: + /* Athena */ + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc64x_prefix), %g7 + or %g7, %lo(prom_sparc64x_prefix), %g7 + mov 9, %g3 +41: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 49f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 41b + add %g1, 1, %g1 + mov SUN4V_CHIP_SPARC64X, %g4 + ba,pt %xcc, 5f + nop + +49: mov SUN4V_CHIP_UNKNOWN, %g4 5: sethi %hi(sun4v_chip_type), %g2 or %g2, %lo(sun4v_chip_type), %g2 diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index fc4320886a3a..4d1487138d26 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -186,6 +186,8 @@ struct grpci2_cap_first { #define CAP9_IOMAP_OFS 0x20 #define CAP9_BARSIZE_OFS 0x24 +#define TGT 256 + struct grpci2_priv { struct leon_pci_info info; /* must be on top of this structure */ struct grpci2_regs *regs; @@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, if (where & 0x3) return -EINVAL; - if (bus == 0 && PCI_SLOT(devfn) != 0) - devfn += (0x8 * 6); + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); @@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, if (where & 0x3) return -EINVAL; - if (bus == 0 && PCI_SLOT(devfn) != 0) - devfn += (0x8 * 6); + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } /* Select bus */ spin_lock_irqsave(&grpci2_dev_lock, flags); @@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, unsigned int busno = bus->number; int ret; - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { + if (PCI_SLOT(devfn) > 15 || busno > 255) { *val = ~0; return 0; } @@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, struct grpci2_priv *priv = grpci2priv; unsigned int busno = bus->number; - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) + if (PCI_SLOT(devfn) > 15 || busno > 255) return 0; #ifdef GRPCI2_DEBUG_CFGACCESS @@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv) REGSTORE(regs->ahbmst_map[i], priv->pci_area); /* Get the GRPCI2 Host PCI ID */ - grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); + grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); /* Get address to first (always defined) capability structure */ - grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); + grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); /* Enable/Disable Byte twisting */ - grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); + grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); /* Setup the Host's PCI Target BARs for other peripherals to access, * and do DMA to the host's memory. The target BARs can be sized and @@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv) pciadr = 0; } } - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); - grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, + bar_sz); + grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", i, pciadr, ahbadr); } /* set as bus master and enable pci memory responses */ - grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); + grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); + grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); /* Enable Error respone (CPU-TRAP) on illegal memory access. */ REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 537eb66abd06..ca64d2a86ec0 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_pmu; @@ -1074,23 +1074,56 @@ local_flush_and_out: put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 48d00e72ce15..8ec4e9c0251a 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len) void bit_map_init(struct bit_map *t, unsigned long *map, int size) { - - if ((size & 07) != 0) - BUG(); - memset(map, 0, size>>3); - + bitmap_zero(map, size); memset(t, 0, sizeof *t); spin_lock_init(&t->lock); t->map = map; diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0f4f7191fbba..28f96f27c768 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -34,7 +34,7 @@ #define IOMMU_RNGE IOMMU_RNGE_256MB #define IOMMU_START 0xF0000000 #define IOMMU_WINSIZE (256*1024*1024U) -#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ +#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ /* srmmu.c */ diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c38bb72e3e80..036c2797dece 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void) SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); - srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); + srmmu_nocache_bitmap = + __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index ba6ae7ffdc2c..83d89bcb44af 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); + struct mm_struct *mm = tb->mm; - if (tb->tlb_nr) { - flush_tsb_user(tb); + if (!tb->tlb_nr) + goto out; - if (CTX_VALID(tb->mm->context)) { + flush_tsb_user(tb); + + if (CTX_VALID(mm->context)) { + if (tb->tlb_nr == 1) { + global_flush_tlb_page(mm, tb->vaddrs[0]); + } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } - tb->tlb_nr = 0; } + tb->tlb_nr = 0; + +out: put_cpu_var(tlb_batch); } +void arch_enter_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + if (tb->tlb_nr) + flush_tlb_pending(); + tb->active = 0; +} + static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec) { @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, nr = 0; } + if (!tb->active) { + global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); + goto out; + } + if (nr == 0) tb->mm = mm; @@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, if (nr >= TLB_BATCH_NR) flush_tlb_pending(); +out: put_cpu_var(tlb_batch); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b9becf..2cc3bce5ee91 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -7,11 +7,10 @@ #include <linux/preempt.h> #include <linux/slab.h> #include <asm/page.h> -#include <asm/tlbflush.h> -#include <asm/tlb.h> -#include <asm/mmu_context.h> #include <asm/pgtable.h> +#include <asm/mmu_context.h> #include <asm/tsb.h> +#include <asm/tlb.h> #include <asm/oplib.h> extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, - unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries) { - unsigned long i; + unsigned long tag, ent, hash; - for (i = 0; i < tb->tlb_nr; i++) { - unsigned long v = tb->vaddrs[i]; - unsigned long tag, ent, hash; + v &= ~0x1UL; + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); - v &= ~0x1UL; + tsb_flush(ent, tag); +} - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) +{ + unsigned long i; - tsb_flush(ent, tag); - } + for (i = 0; i < tb->tlb_nr; i++) + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } void flush_tsb_user(struct tlb_batch *tb) @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + } +#endif + spin_unlock_irqrestore(&mm->context.lock, flags); +} + #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index f8e13d421fcb..432aa0cb1b38 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */ nop .align 32 + .globl __flush_tlb_page +__flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, %pstate + mov SECONDARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + andn %o1, 1, %o3 + be,pn %icc, 1f + or %o3, 0x10, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + retl + wrpr %g7, 0x0, %pstate + nop + nop + nop + nop + + .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + be,pn %icc, 1f + andn %o1, 1, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop +__hypervisor_flush_tlb_page: /* 11 insns */ + /* %o0 = context, %o1 = vaddr */ + mov %o0, %g2 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g2, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + retl + nop + __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=context, %g1=nr, %g7=vaddrs[] */ - sllx %g1, 3, %g1 + .globl xcall_flush_tlb_page +xcall_flush_tlb_page: /* 17 insns */ + /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %g5 - andcc %g5, 0x1, %g0 + andcc %g1, 0x1, %g0 be,pn %icc, 2f - - andn %g5, 0x1, %g5 + andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync - brnz,pt %g1, 1b - nop stxa %g2, [%g4] ASI_DMMU retry nop + nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ - sllx %g1, 3, %g1 + .globl __hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ + /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 - brnz,pt %g1, 1b - nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__hypervisor_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 + call tlb_patch_one + mov 11, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_pending), %o0 - or %o0, %lo(xcall_flush_tlb_pending), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + sethi %hi(xcall_flush_tlb_page), %o0 + or %o0, %lo(xcall_flush_tlb_page), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 21, %o2 + mov 17, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index ff496ab1e794..25877aebc685 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -17,7 +17,7 @@ config TILE select GENERIC_IRQ_SHOW select HAVE_DEBUG_BUGVERBOSE select HAVE_SYSCALL_WRAPPERS if TILEGX - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select SYS_HYPERVISOR select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CLOCKEVENTS diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 8c5eff6d6df5..47684815e5c8 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MULTICORE_RAID456=y CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m CONFIG_DM_DEBUG=y diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index e7a3dfcbcda7..dd2b8f0c631f 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m -CONFIG_MULTICORE_RAID456=y CONFIG_MD_FAULTY=m CONFIG_BLK_DEV_DM=m CONFIG_DM_DEBUG=y diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb60b12..c96f9bbb760d 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -40,7 +40,15 @@ #include <asm/percpu.h> #include <arch/spr_def.h> -/* Set and clear kernel interrupt masks. */ +/* + * Set and clear kernel interrupt masks. + * + * NOTE: __insn_mtspr() is a compiler builtin marked as a memory + * clobber. We rely on it being equivalent to a compiler barrier in + * this code since arch_local_irq_save() and friends must act as + * compiler barriers. This compiler semantic is baked into enough + * places that the compiler will maintain it going forward. + */ #if CHIP_HAS_SPLIT_INTR_MASK() #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 # error Fix assumptions about which word various interrupts are in diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c6..7a5aa1a7864e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD -/* - * Note that the kernel can potentially support other compression - * techniques than gz, though we don't do so by default. If we ever - * decide to do so we can either look for other filename extensions, - * or just allow a file with this name to be compressed with an - * arbitrary compressor (somewhat counterintuitively). - */ static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; +static char __initdata initramfs_file[128] = "initramfs"; static int __init setup_initramfs_file(char *str) { @@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for an "initramfs.cpio.gz" file in the hvfs. - * If there is one, we allocate some memory for it and it will be - * unpacked to the initramfs. + * We look for a file called "initramfs" in the hvfs. If there is one, we + * allocate some memory for it and it will be unpacked to the initramfs. + * If it's compressed, the initd code will uncompress it first. */ static void __init load_hv_initrd(void) { @@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { - if (set_initramfs_file) + if (set_initramfs_file) { pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); - return; + return; + } else { + /* Try old backwards-compatible name. */ + fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); + if (fd == HV_ENOENT) + return; + } } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h index 78f1b8999964..c512b0306dd4 100644 --- a/arch/um/drivers/chan.h +++ b/arch/um/drivers/chan.h @@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf, extern int console_open_chan(struct line *line, struct console *co); extern void deactivate_chan(struct chan *chan, int irq); extern void reactivate_chan(struct chan *chan, int irq); -extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty); +extern void chan_enable_winch(struct chan *chan, struct tty_port *port); extern int enable_chan(struct line *line); extern void close_chan(struct line *line); extern int chan_window_size(struct line *line, diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 15c553c239a1..80b47cb71e0a 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans) return err; } -void chan_enable_winch(struct chan *chan, struct tty_struct *tty) +void chan_enable_winch(struct chan *chan, struct tty_port *port) { if (chan && chan->primary && chan->ops->winch) - register_winch(chan->fd, tty); + register_winch(chan->fd, port); } static void line_timer_cb(struct work_struct *work) diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c index 9be670ad23b5..3fd7c3efdb18 100644 --- a/arch/um/drivers/chan_user.c +++ b/arch/um/drivers/chan_user.c @@ -216,7 +216,7 @@ static int winch_thread(void *arg) } } -static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out, +static int winch_tramp(int fd, struct tty_port *port, int *fd_out, unsigned long *stack_out) { struct winch_data data; @@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out, return err; } -void register_winch(int fd, struct tty_struct *tty) +void register_winch(int fd, struct tty_port *port) { unsigned long stack; int pid, thread, count, thread_fd = -1; @@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty) return; pid = tcgetpgrp(fd); - if (is_skas_winch(pid, fd, tty)) { - register_winch_irq(-1, fd, -1, tty, 0); + if (is_skas_winch(pid, fd, port)) { + register_winch_irq(-1, fd, -1, port, 0); return; } if (pid == -1) { - thread = winch_tramp(fd, tty, &thread_fd, &stack); + thread = winch_tramp(fd, port, &thread_fd, &stack); if (thread < 0) return; - register_winch_irq(thread_fd, fd, thread, tty, stack); + register_winch_irq(thread_fd, fd, thread, port, stack); count = write(thread_fd, &c, sizeof(c)); if (count != sizeof(c)) diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h index dc693298eb8f..03f1b565c5f9 100644 --- a/arch/um/drivers/chan_user.h +++ b/arch/um/drivers/chan_user.h @@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out, unsigned short *cols_out); extern void generic_free(void *data); -struct tty_struct; -extern void register_winch(int fd, struct tty_struct *tty); +struct tty_port; +extern void register_winch(int fd, struct tty_port *port); extern void register_winch_irq(int fd, int tty_fd, int pid, - struct tty_struct *tty, unsigned long stack); + struct tty_port *port, unsigned long stack); #define __channel_help(fn, prefix) \ __uml_help(fn, prefix "[0-9]*=<channel description>\n" \ diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index f1b38571f94e..be541cf69fd2 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -305,7 +305,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty) return ret; if (!line->sigio) { - chan_enable_winch(line->chan_out, tty); + chan_enable_winch(line->chan_out, port); line->sigio = 1; } @@ -315,8 +315,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty) return 0; } +static void unregister_winch(struct tty_struct *tty); + +static void line_destruct(struct tty_port *port) +{ + struct tty_struct *tty = tty_port_tty_get(port); + struct line *line = tty->driver_data; + + if (line->sigio) { + unregister_winch(tty); + line->sigio = 0; + } +} + static const struct tty_port_operations line_port_ops = { .activate = line_activate, + .destruct = line_destruct, }; int line_open(struct tty_struct *tty, struct file *filp) @@ -340,18 +354,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty, return 0; } -static void unregister_winch(struct tty_struct *tty); - -void line_cleanup(struct tty_struct *tty) -{ - struct line *line = tty->driver_data; - - if (line->sigio) { - unregister_winch(tty); - line->sigio = 0; - } -} - void line_close(struct tty_struct *tty, struct file * filp) { struct line *line = tty->driver_data; @@ -601,7 +603,7 @@ struct winch { int fd; int tty_fd; int pid; - struct tty_struct *tty; + struct tty_port *port; unsigned long stack; struct work_struct work; }; @@ -655,7 +657,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) goto out; } } - tty = winch->tty; + tty = tty_port_tty_get(winch->port); if (tty != NULL) { line = tty->driver_data; if (line != NULL) { @@ -663,6 +665,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) &tty->winsize.ws_col); kill_pgrp(tty->pgrp, SIGWINCH, 1); } + tty_kref_put(tty); } out: if (winch->fd != -1) @@ -670,7 +673,7 @@ static irqreturn_t winch_interrupt(int irq, void *data) return IRQ_HANDLED; } -void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, +void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port, unsigned long stack) { struct winch *winch; @@ -685,7 +688,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, .fd = fd, .tty_fd = tty_fd, .pid = pid, - .tty = tty, + .port = port, .stack = stack }); if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, @@ -714,15 +717,18 @@ static void unregister_winch(struct tty_struct *tty) { struct list_head *ele, *next; struct winch *winch; + struct tty_struct *wtty; spin_lock(&winch_handler_lock); list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); - if (winch->tty == tty) { + wtty = tty_port_tty_get(winch->port); + if (wtty == tty) { free_winch(winch); break; } + tty_kref_put(wtty); } spin_unlock(&winch_handler_lock); } diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index d8926c303629..39f186252e02 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); len = (*lp->write)(lp->fd, skb, lp); + skb_tx_timestamp(skb); if (len == skb->len) { dev->stats.tx_packets++; @@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev, static const struct ethtool_ops uml_net_ethtool_ops = { .get_drvinfo = uml_net_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static void uml_net_user_timer_expire(unsigned long _conn) diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index 16fdd0a0f9d6..b8d14fa52059 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c @@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = { .throttle = line_throttle, .unthrottle = line_unthrottle, .install = ssl_install, - .cleanup = line_cleanup, .hangup = line_hangup, }; diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c index 827777af3f6d..7b361f36ca96 100644 --- a/arch/um/drivers/stdio_console.c +++ b/arch/um/drivers/stdio_console.c @@ -110,7 +110,6 @@ static const struct tty_operations console_ops = { .set_termios = line_set_termios, .throttle = line_throttle, .unthrottle = line_unthrottle, - .cleanup = line_cleanup, .hangup = line_hangup, }; diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index b1469fe93295..9d9f1b4bf826 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -15,7 +15,7 @@ #include <sysdep/mcontext.h> #include "internal.h" -void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { +void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, [SIGFPE] = relay_signal, [SIGILL] = relay_signal, diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index da4b9e9999fd..337518c5042a 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -15,6 +15,8 @@ #include <sys/mman.h> #include <sys/stat.h> #include <sys/wait.h> +#include <sys/time.h> +#include <sys/resource.h> #include <asm/unistd.h> #include <init.h> #include <os.h> diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index dc50b157fc83..2943e3acdf0c 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -9,7 +9,7 @@ config UNICORE32 select GENERIC_ATOMIC64 select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select ARCH_HAVE_CUSTOM_GPIO_H select GENERIC_FIND_FIRST_BIT select GENERIC_IRQ_PROBE diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a4f24f5b1218..15b5cef4aa38 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -112,7 +112,7 @@ config X86 select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 select HAVE_IRQ_TIME_ACCOUNTING - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select MODULES_USE_ELF_REL if X86_32 select MODULES_USE_ELF_RELA if X86_64 select CLONE_BACKWARDS if X86_32 @@ -1549,6 +1549,7 @@ config X86_SMAP config EFI bool "EFI runtime service support" depends on ACPI + select UCS2_STRING ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1b..5ef205c5f37b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone -$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += vmlinux.bin.all vmlinux.relocs +targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035a6b96..35ee62fccf98 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) *size = len; } +static efi_status_t setup_efi_vars(struct boot_params *params) +{ + struct setup_data *data; + struct efi_var_bootdata *efidata; + u64 store_size, remaining_size, var_size; + efi_status_t status; + + if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) + return EFI_UNSUPPORTED; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, &store_size, + &remaining_size, &var_size); + + if (status != EFI_SUCCESS) + return status; + + status = efi_call_phys3(sys_table->boottime->allocate_pool, + EFI_LOADER_DATA, sizeof(*efidata), &efidata); + + if (status != EFI_SUCCESS) + return status; + + efidata->data.type = SETUP_EFI_VARS; + efidata->data.len = sizeof(struct efi_var_bootdata) - + sizeof(struct setup_data); + efidata->data.next = 0; + efidata->store_size = store_size; + efidata->remaining_size = remaining_size; + efidata->max_var_size = var_size; + + if (data) + data->next = (unsigned long)efidata; + else + params->hdr.setup_data = (unsigned long)efidata; + +} + static efi_status_t setup_efi_pci(struct boot_params *params) { efi_pci_io_protocol *pci; @@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, setup_graphics(boot_params); + setup_efi_vars(boot_params); + setup_efi_pci(boot_params); status = efi_call_phys3(sys_table->boottime->allocate_pool, diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 60c89f30c727..2fb5d5884e23 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +struct efi_var_bootdata { + struct setup_data data; + u64 store_size; + u64 remaining_size; + u64 max_var_size; +}; + #ifdef CONFIG_EFI static inline bool efi_is_native(void) diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d3ddd17405d0..5a6d2873f80e 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -77,6 +77,7 @@ struct arch_specific_insn { * a post_handler or break_handler). */ int boostable; + bool if_modifier; }; struct arch_optimized_insn { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 635a74d22409..4979778cc7fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -414,8 +414,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - unsigned int time_offset; - struct page *time_page; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; /* set guest stopped flag in pvclock flags field */ bool pvclock_set_guest_stopped_request; diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5edd1742cfd0..7361e47db79f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +} static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 142236ed83af..b3b0ec1dac86 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,6 +91,7 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); + void (*flush)(void); }; struct pv_time_ops { @@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b62592..2e188d68397c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax & __SYSCALL_MASK; + return regs->orig_ax; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax & __SYSCALL_MASK; + regs->ax = regs->orig_ax; } static inline long syscall_get_error(struct task_struct *task, diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4fef20773b8f..c7797307fc2b 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -7,7 +7,7 @@ #define tlb_flush(tlb) \ { \ - if (tlb->fullmm == 0) \ + if (!tlb->fullmm && !tlb->need_flush_all) \ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ else \ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index c20d1ce62dc6..e709884d0ef9 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str) return _hypercall3(int, console_io, cmd, count, str); } -extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); +extern int __must_check xen_physdev_op_compat(int, void *); static inline int HYPERVISOR_physdev_op(int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); if (unlikely(rc == -ENOSYS)) - rc = HYPERVISOR_physdev_op_compat(cmd, arg); + rc = xen_physdev_op_compat(cmd, arg); return rc; } diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..08744242b8d2 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -6,6 +6,7 @@ #define SETUP_E820_EXT 1 #define SETUP_DTB 2 #define SETUP_PCI 3 +#define SETUP_EFI_VARS 4 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 892ce40a7470..7a060f4b411f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -44,6 +44,7 @@ #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) +#define MSR_PLATFORM_INFO 0x000000ce #define MSR_MTRRcap 0x000000fe #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d83fb70..8f4be53ea04b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void) if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - /* - * Xen emulates Hyper-V to support enlightened Windows. - * Check to see first if we are on a Xen Hypervisor. - */ - if (xen_cpuid_base()) - return false; - cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); @@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void) if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); -#if IS_ENABLED(CONFIG_HYPERV) - /* - * Setup the IDT for hypervisor callback. - */ - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); -#endif } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { @@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr; void hv_register_vmbus_handler(int irq, irq_handler_t handler) { + /* + * Setup the IDT for hypervisor callback. + */ + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); + vmbus_irq = irq; vmbus_isr = handler; } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 529c8931fc02..cc45deb791b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ + INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ + INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ + INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ @@ -149,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), + EVENT_EXTRA_END +}; + +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), EVENT_EXTRA_END }; @@ -2093,7 +2103,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 45) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; @@ -2119,7 +2132,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_ivb_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 62) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 826054a4f2ee..26830f3af0df 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void) if (top <= at) return 0; + memset(®s, 0, sizeof(regs)); + ds->bts_index = ds->bts_buffer_base; perf_sample_data_init(&data, 0, event->hw.last_period); - regs.ip = 0; /* * Prepare a generic sample, i.e. fill in the invariant fields. @@ -729,3 +730,13 @@ void intel_ds_init(void) } } } + +void perf_restore_debug_store(void) +{ + struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); + + if (!x86_pmu.bts && !x86_pmu.pebs) + return; + + wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); +} diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f06e6149981..7bfe318d3d8a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) else p->ainsn.boostable = -1; + /* Check whether the instruction modifies Interrupt Flag or not */ + p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); + /* Also, displacement change doesn't affect the first byte */ p->opcode = p->ainsn.insn[0]; } @@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); - if (is_IF_modifier(p->ainsn.insn)) + if (p->ainsn.if_modifier) kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; } diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 577db8417d15..833d51d6ee06 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c @@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void) u32 eax = 0x00000000; u32 ebx, ecx = 0, edx; - if (!have_cpuid_p()) - return X86_VENDOR_UNKNOWN; - native_cpuid(&eax, &ebx, &ecx, &edx); if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) @@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void) return X86_VENDOR_UNKNOWN; } +static int __cpuinit x86_family(void) +{ + u32 eax = 0x00000001; + u32 ebx, ecx = 0, edx; + int x86; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + x86 = (eax >> 8) & 0xf; + if (x86 == 15) + x86 += (eax >> 20) & 0xff; + + return x86; +} + void __init load_ucode_bsp(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; - if (vendor == X86_VENDOR_INTEL) + vendor = x86_vendor(); + x86 = x86_family(); + + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_bsp(); } void __cpuinit load_ucode_ap(void) { - int vendor = x86_vendor(); + int vendor, x86; + + if (!have_cpuid_p()) + return; + + vendor = x86_vendor(); + x86 = x86_family(); - if (vendor == X86_VENDOR_INTEL) + if (vendor == X86_VENDOR_INTEL && x86 >= 6) load_ucode_intel_ap(); } diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index 7890bc838952..d893e8ed8ac9 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c @@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, struct microcode_intel ***mc_saved; mc_saved = (struct microcode_intel ***) - __pa_symbol(&mc_saved_data->mc_saved); + __pa_nodebug(&mc_saved_data->mc_saved); for (i = 0; i < mc_saved_data->mc_saved_count; i++) { struct microcode_intel *p; p = *(struct microcode_intel **) - __pa(mc_saved_data->mc_saved + i); - mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); + __pa_nodebug(mc_saved_data->mc_saved + i); + mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); } } #endif @@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end, struct cpio_data cd; long offset = 0; #ifdef CONFIG_X86_32 - char *p = (char *)__pa_symbol(ucode_name); + char *p = (char *)__pa_nodebug(ucode_name); #else char *p = ucode_name; #endif @@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci) if (mc_intel == NULL) return; - delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); - current_mc_date_p = (int *)__pa_symbol(¤t_mc_date); + delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); + current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); *delay_ucode_info_p = 1; *current_mc_date_p = mc_intel->hdr.date; @@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) } #endif -static int apply_microcode_early(struct mc_saved_data *mc_saved_data, - struct ucode_cpu_info *uci) +static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, + struct ucode_cpu_info *uci) { struct microcode_intel *mc_intel; unsigned int val[2]; @@ -741,15 +741,15 @@ load_ucode_intel_bsp(void) #ifdef CONFIG_X86_32 struct boot_params *boot_params_p; - boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); + boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params); ramdisk_image = boot_params_p->hdr.ramdisk_image; ramdisk_size = boot_params_p->hdr.ramdisk_size; initrd_start_early = ramdisk_image; initrd_end_early = initrd_start_early + ramdisk_size; _load_ucode_intel_bsp( - (struct mc_saved_data *)__pa_symbol(&mc_saved_data), - (unsigned long *)__pa_symbol(&mc_saved_in_initrd), + (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), + (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), initrd_start_early, initrd_end_early, &uci); #else ramdisk_image = boot_params.hdr.ramdisk_image; @@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void) unsigned long *initrd_start_p; mc_saved_in_initrd_p = - (unsigned long *)__pa_symbol(mc_saved_in_initrd); - mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); - initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); - initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); + (unsigned long *)__pa_nodebug(mc_saved_in_initrd); + mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); + initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); + initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); #else mc_saved_data_p = &mc_saved_data; mc_saved_in_initrd_p = mc_saved_in_initrd; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 17fff18a1031..8bfb335f74bb 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } +void paravirt_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return this_cpu_read(paravirt_lazy_mode); } -void arch_flush_lazy_mmu_mode(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, + .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 90d8cc930f5e..fae9134a2de9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void) /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. + * On 64bit, old kexec-tools need to under 896MiB. */ #ifdef CONFIG_X86_32 -# define CRASH_KERNEL_ADDR_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) +# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) #else -# define CRASH_KERNEL_ADDR_MAX MAXMEM +# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) +# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM #endif static void __init reserve_crashkernel_low(void) @@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void) unsigned long long low_base = 0, low_size = 0; unsigned long total_low_mem; unsigned long long base; + bool auto_set = false; int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); + /* crashkernel=Y,low */ ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); - if (ret != 0 || low_size <= 0) - return; + if (ret != 0) { + /* + * two parts from lib/swiotlb.c: + * swiotlb size: user specified with swiotlb= or default. + * swiotlb overflow buffer: now is hardcoded to 32k. + * We round it to 8M for other buffers that + * may need to stay low too. + */ + low_size = swiotlb_size_or_default() + (8UL<<20); + auto_set = true; + } else { + /* passed with crashkernel=0,low ? */ + if (!low_size) + return; + } low_base = memblock_find_in_range(low_size, (1ULL<<32), low_size, alignment); if (!low_base) { - pr_info("crashkernel low reservation failed - No suitable area found.\n"); + if (!auto_set) + pr_info("crashkernel low reservation failed - No suitable area found.\n"); return; } @@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void) const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long total_mem; unsigned long long crash_size, crash_base; + bool high = false; int ret; total_mem = memblock_phys_mem_size(); + /* crashkernel=XM */ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); - if (ret != 0 || crash_size <= 0) - return; + if (ret != 0 || crash_size <= 0) { + /* crashkernel=X,high */ + ret = parse_crashkernel_high(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret != 0 || crash_size <= 0) + return; + high = true; + } /* 0 means: find the address automatically */ if (crash_base <= 0) { @@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void) * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX */ crash_base = memblock_find_in_range(alignment, - CRASH_KERNEL_ADDR_MAX, crash_size, alignment); + high ? CRASH_KERNEL_ADDR_HIGH_MAX : + CRASH_KERNEL_ADDR_LOW_MAX, + crash_size, alignment); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr); + addr, sizeof(u8)); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f71500af1f81..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; - void *shared_kaddr; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; - struct pvclock_vcpu_time_info *guest_hv_clock; + struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; - /* Keep irq disabled to prevent changes to the clock */ - local_irq_save(flags); - this_tsc_khz = __get_cpu_var(cpu_tsc_khz); - if (unlikely(this_tsc_khz == 0)) { - local_irq_restore(flags); - kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); - return 1; - } - /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. @@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); + + /* Keep irq disabled to prevent changes to the clock */ + local_irq_save(flags); + this_tsc_khz = __get_cpu_var(cpu_tsc_khz); + if (unlikely(this_tsc_khz == 0)) { + local_irq_restore(flags); + kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); + return 1; + } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); @@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_restore(flags); - if (!vcpu->time_page) + if (!vcpu->pv_time_enabled) return 0; /* @@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ vcpu->hv_clock.version += 2; - shared_kaddr = kmap_atomic(vcpu->time_page); - - guest_hv_clock = shared_kaddr + vcpu->time_offset; + if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, + &guest_hv_clock, sizeof(guest_hv_clock)))) + return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ - pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); + pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; @@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.flags = pvclock_flags; - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); - - kunmap_atomic(shared_kaddr); - - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); return 0; } @@ -1827,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1837,10 +1834,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - if (vcpu->arch.time_page) { - kvm_release_page_dirty(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) @@ -1947,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { + u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1956,14 +1951,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & 1)) break; - /* ...but clean it before doing the actual write */ - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - - vcpu->arch.time_page = - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + gpa_offset = data & ~(PAGE_MASK | 1); - if (is_error_page(vcpu->arch.time_page)) - vcpu->arch.time_page = NULL; + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) + vcpu->arch.pv_time_enabled = false; + else + vcpu->arch.pv_time_enabled = true; break; } @@ -1980,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS)) + data & KVM_STEAL_VALID_BITS, + sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; @@ -2967,7 +2963,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { - if (!vcpu->arch.time_page) + if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -6718,6 +6714,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; + vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cbd89ca5569..7114c63f047d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1334,6 +1334,7 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 05928aae911e..906fea315791 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) char c; unsigned zero_len; - for (; len; --len) { + for (; len; --len, to++) { if (__get_user_nocheck(c, from++, sizeof(char))) break; - if (__put_user_nocheck(c, to++, sizeof(char))) + if (__put_user_nocheck(c, to, sizeof(char))) break; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b97525246d4..0e883364abb5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); - else + arch_flush_lazy_mmu_mode(); + } else { BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } /* * Below here mismatches are bugs because these lower tables diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b0086567271c..0e38951e65eb 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -68,7 +68,7 @@ static int print_split(struct split_state *s) s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { - if (!(pte_val(*pte) & _PAGE_PSE)) { + if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 091934e1d0d9..fb4e73ec24d8 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * We are safe now. Check whether the new pgprot is the same: */ old_pte = *kpte; - old_prot = new_prot = req_prot = pte_pgprot(old_pte); + old_prot = req_prot = pte_pgprot(old_pte); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); @@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL * for the ancient hardware that doesn't support it. */ - if (pgprot_val(new_prot) & _PAGE_PRESENT) - pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; + if (pgprot_val(req_prot) & _PAGE_PRESENT) + pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; else - pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); + pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); - new_prot = canon_pgprot(new_prot); + req_prot = canon_pgprot(req_prot); /* * old_pte points to the large page base address. So we need @@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) * but that can deadlock->flush only current cpu: */ __flush_tlb_all(); + + arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_HIBERNATION diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 193350b51f90..17fda6a8b3c2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); + /* + * NOTE! For PAE, any changes to the top page-directory-pointer-table + * entries need a full cr3 reload to flush. + */ +#ifdef CONFIG_X86_PAE + tlb->need_flush_all = 1; +#endif tlb_remove_page(tlb, virt_to_page(pmd)); } diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 94e76620460f..4a9be6ddf054 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) goto error; i = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], (type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : "pcifront-msi", @@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) dev_dbg(&dev->dev, "xen: msi already bound to pirq=%d\n", pirq); } - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", DOMID_SELF); @@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } ret = xen_bind_pirq_msi_to_irq(dev, msidesc, - map_irq.pirq, map_irq.index, + map_irq.pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", domid); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 5f2ecaf3f9d8..e4a86a677ce1 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -41,6 +41,7 @@ #include <linux/io.h> #include <linux/reboot.h> #include <linux/bcd.h> +#include <linux/ucs2_string.h> #include <asm/setup.h> #include <asm/efi.h> @@ -51,6 +52,13 @@ #define EFI_DEBUG 1 +/* + * There's some additional metadata associated with each + * variable. Intel's reference implementation is 60 bytes - bump that + * to account for potential alignment constraints + */ +#define VAR_METADATA_SIZE 64 + struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .acpi = EFI_INVALID_TABLE_ADDR, @@ -69,6 +77,13 @@ struct efi_memory_map memmap; static struct efi efi_phys __initdata; static efi_system_table_t efi_systab __initdata; +static u64 efi_var_store_size; +static u64 efi_var_remaining_size; +static u64 efi_var_max_var_size; +static u64 boot_used_size; +static u64 boot_var_size; +static u64 active_size; + unsigned long x86_efi_facility; /* @@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg) } early_param("add_efi_memmap", setup_add_efi_memmap); +static bool efi_no_storage_paranoia; + +static int __init setup_storage_paranoia(char *arg) +{ + efi_no_storage_paranoia = true; + return 0; +} +early_param("efi_no_storage_paranoia", setup_storage_paranoia); + static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { @@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - return efi_call_virt3(get_next_variable, - name_size, name, vendor); + efi_status_t status; + static bool finished = false; + static u64 var_size; + + status = efi_call_virt3(get_next_variable, + name_size, name, vendor); + + if (status == EFI_NOT_FOUND) { + finished = true; + if (var_size < boot_used_size) { + boot_var_size = boot_used_size - var_size; + active_size += boot_var_size; + } else { + printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); + } + } + + if (boot_used_size && !finished) { + unsigned long size; + u32 attr; + efi_status_t s; + void *tmp; + + s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); + + if (s != EFI_BUFFER_TOO_SMALL || !size) + return status; + + tmp = kmalloc(size, GFP_ATOMIC); + + if (!tmp) + return status; + + s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); + + if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { + var_size += size; + var_size += ucs2_strsize(name, 1024); + active_size += size; + active_size += VAR_METADATA_SIZE; + active_size += ucs2_strsize(name, 1024); + } + + kfree(tmp); + } + + return status; } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - return efi_call_virt5(set_variable, - name, vendor, attr, - data_size, data); + efi_status_t status; + u32 orig_attr = 0; + unsigned long orig_size = 0; + + status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, + NULL); + + if (status != EFI_BUFFER_TOO_SMALL) + orig_size = 0; + + status = efi_call_virt5(set_variable, + name, vendor, attr, + data_size, data); + + if (status == EFI_SUCCESS) { + if (orig_size) { + active_size -= orig_size; + active_size -= ucs2_strsize(name, 1024); + active_size -= VAR_METADATA_SIZE; + } + if (data_size) { + active_size += data_size; + active_size += ucs2_strsize(name, 1024); + active_size += VAR_METADATA_SIZE; + } + } + + return status; } static efi_status_t virt_efi_query_variable_info(u32 attr, @@ -682,6 +776,9 @@ void __init efi_init(void) char vendor[100] = "unknown"; int i = 0; void *tmp; + struct setup_data *data; + struct efi_var_bootdata *efi_var_data; + u64 pa_data; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -699,6 +796,22 @@ void __init efi_init(void) if (efi_systab_init(efi_phys.systab)) return; + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = early_ioremap(pa_data, sizeof(*efi_var_data)); + if (data->type == SETUP_EFI_VARS) { + efi_var_data = (struct efi_var_bootdata *)data; + + efi_var_store_size = efi_var_data->store_size; + efi_var_remaining_size = efi_var_data->remaining_size; + efi_var_max_var_size = efi_var_data->max_var_size; + } + pa_data = data->next; + early_iounmap(data, sizeof(*efi_var_data)); + } + + boot_used_size = efi_var_store_size - efi_var_remaining_size; + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); /* @@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr) } return 0; } + +/* + * Some firmware has serious problems when using more than 50% of the EFI + * variable store, i.e. it triggers bugs that can brick machines. Ensure that + * we never use more than this safe limit. + * + * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable + * store. + */ +efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ + efi_status_t status; + u64 storage_size, remaining_size, max_size; + + status = efi.query_variable_info(attributes, &storage_size, + &remaining_size, &max_size); + if (status != EFI_SUCCESS) + return status; + + if (!max_size && remaining_size > size) + printk_once(KERN_ERR FW_BUG "Broken EFI implementation" + " is returning MaxVariableSize=0\n"); + /* + * Some firmware implementations refuse to boot if there's insufficient + * space in the variable store. We account for that by refusing the + * write if permitting it would reduce the available space to under + * 50%. However, some firmware won't reclaim variable space until + * after the used (not merely the actively used) space drops below + * a threshold. We can approximate that case with the value calculated + * above. If both the firmware and our calculations indicate that the + * available space would drop below 50%, refuse the write. + */ + + if (!storage_size || size > remaining_size || + (max_size && size > max_size)) + return EFI_OUT_OF_RESOURCES; + + if (!efi_no_storage_paranoia && + ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && + (remaining_size - size < storage_size / 2))) + return EFI_OUT_OF_RESOURCES; + + return EFI_SUCCESS; +} +EXPORT_SYMBOL_GPL(efi_query_variable_store); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 120cee1c3f8d..3c68768d7a75 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -11,6 +11,7 @@ #include <linux/suspend.h> #include <linux/export.h> #include <linux/smp.h> +#include <linux/perf_event.h> #include <asm/pgtable.h> #include <asm/proto.h> @@ -228,6 +229,7 @@ static void __restore_processor_state(struct saved_context *ctxt) do_fpu_end(); x86_platform.restore_sched_clock_state(); mtrr_bp_restore(); + perf_restore_debug_store(); } /* Needed by apm.c */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c8e1c7b95c3b..ddbd54a9b845 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -31,6 +31,7 @@ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/edd.h> #include <xen/xen.h> #include <xen/events.h> @@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = { .emergency_restart = xen_emergency_restart, }; +static void __init xen_boot_params_init_edd(void) +{ +#if IS_ENABLED(CONFIG_EDD) + struct xen_platform_op op; + struct edd_info *edd_info; + u32 *mbr_signature; + unsigned nr; + int ret; + + edd_info = boot_params.eddbuf; + mbr_signature = boot_params.edd_mbr_sig_buffer; + + op.cmd = XENPF_firmware_info; + + op.u.firmware_info.type = XEN_FW_DISK_INFO; + for (nr = 0; nr < EDDMAXNR; nr++) { + struct edd_info *info = edd_info + nr; + + op.u.firmware_info.index = nr; + info->params.length = sizeof(info->params); + set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, + &info->params); + ret = HYPERVISOR_dom0_op(&op); + if (ret) + break; + +#define C(x) info->x = op.u.firmware_info.u.disk_info.x + C(device); + C(version); + C(interface_support); + C(legacy_max_cylinder); + C(legacy_max_head); + C(legacy_sectors_per_track); +#undef C + } + boot_params.eddbuf_entries = nr; + + op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; + for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { + op.u.firmware_info.index = nr; + ret = HYPERVISOR_dom0_op(&op); + if (ret) + break; + mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; + } + boot_params.edd_mbr_sig_buf_entries = nr; +#endif +} + /* * Set up the GDT and segment registers for -fstack-protector. Until * we do this, we have to be careful not to call any stack-protected @@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void) /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; + + xen_boot_params_init_edd(); } #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ @@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: xen_vcpu_setup(cpu); - if (xen_have_vector_callback) + if (xen_have_vector_callback) { xen_init_lock_cpu(cpu); + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + } break; default: break; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8e34938c57d..e006c18d288a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3) __xen_write_cr3(true, cr3); xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ - - pv_mmu_ops.write_cr3 = &xen_write_cr3; } #endif @@ -1750,14 +1748,18 @@ static void *m2v(phys_addr_t maddr) } /* Set the page permissions on an identity-mapped pages */ -static void set_page_prot(void *addr, pgprot_t prot) +static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) { unsigned long pfn = __pa(addr) >> PAGE_SHIFT; pte_t pte = pfn_pte(pfn, prot); - if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) + if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) BUG(); } +static void set_page_prot(void *addr, pgprot_t prot) +{ + return set_page_prot_flags(addr, prot, UVMF_NONE); +} #ifdef CONFIG_X86_32 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { @@ -1841,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, unsigned long addr) { if (*pt_base == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_base)++; } if (*pt_end == PFN_DOWN(__pa(addr))) { - set_page_prot((void *)addr, PAGE_KERNEL); + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); clear_page((void *)addr); (*pt_end)--; } @@ -2122,6 +2124,7 @@ static void __init xen_post_allocator_init(void) #endif #ifdef CONFIG_X86_64 + pv_mmu_ops.write_cr3 = &xen_write_cr3; SetPagePinned(virt_to_page(level3_user_vsyscall)); #endif xen_mark_init_mm_pinned(); @@ -2197,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, + .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 09ea61d2e02f..0d466d7c7175 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(xen_callfuncsingle_irq, cpu) = rc; + /* + * The IRQ worker on PVHVM goes through the native path and uses the + * IPI mechanism. + */ + if (xen_hvm_domain()) + return 0; + callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, @@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu) if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); + if (xen_hvm_domain()) + return rc; + if (per_cpu(xen_irq_work, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); @@ -418,7 +428,7 @@ static int xen_cpu_disable(void) static void xen_cpu_die(unsigned int cpu) { - while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { + while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } @@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu) unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); + if (!xen_hvm_domain()) + unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); } @@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) static void xen_hvm_cpu_die(unsigned int cpu) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); + xen_cpu_die(cpu); native_cpu_die(cpu); } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index f7a080ef0354..8b54603ce816 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu) int irq; const char *name; + WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", + cpu, per_cpu(lock_kicker_irq, cpu)); + + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, @@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu) void xen_uninit_lock_cpu(int cpu) { + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); + per_cpu(lock_kicker_irq, cpu) = -1; } void __init xen_init_spinlocks(void) { + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); pv_lock_ops.spin_is_locked = xen_spin_is_locked; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0296a9522501..3d88bfdf9e1c 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = { static const struct clock_event_device *xen_clockevent = &xen_timerop_clockevent; -static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); +static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 }; static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) { @@ -401,6 +401,9 @@ void xen_setup_timer(int cpu) struct clock_event_device *evt; int irq; + evt = &per_cpu(xen_clock_events, cpu); + WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); + printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); name = kasprintf(GFP_KERNEL, "timer%d", cpu); @@ -413,7 +416,6 @@ void xen_setup_timer(int cpu) IRQF_FORCE_RESUME, name, NULL); - evt = &per_cpu(xen_clock_events, cpu); memcpy(evt, xen_clockevent, sizeof(*evt)); evt->cpumask = cpumask_of(cpu); @@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu) BUG_ON(cpu == 0); evt = &per_cpu(xen_clock_events, cpu); unbind_from_irqhandler(evt->irq, NULL); + evt->irq = -1; } void xen_setup_cpu_clockevents(void) @@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void) { int cpu = smp_processor_id(); xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); + /* + * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence + * doing it xen_hvm_cpu_notify (which gets called by smp_init during + * early bootup and also during CPU hotplug events). + */ xen_setup_cpu_clockevents(); } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 35876ffac11d..b09de49dbec5 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -9,7 +9,7 @@ config XTENSA select HAVE_IDE select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS - select HAVE_VIRT_TO_BUS + select VIRT_TO_BUS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select MODULES_USE_ELF_RELA diff --git a/block/blk-core.c b/block/blk-core.c index 074b758efc42..7c288358a745 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -39,6 +39,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); DEFINE_IDA(blk_queue_ida); diff --git a/block/blk-flush.c b/block/blk-flush.c index db8f1b507857..cc2b827a853c 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, * copied from blk_rq_pos(rq). */ if (error_sector) - *error_sector = bio->bi_sector; + *error_sector = bio->bi_sector; if (!bio_flagged(bio, BIO_UPTODATE)) ret = -EIO; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6206a934eb8c..5efc5a647183 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ + if (ret < 0) \ + return ret; \ if (neg) \ val = !val; \ \ diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index ef5356cd280a..0262210cad38 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, else if (len < ds) msg->msg_flags |= MSG_TRUNC; + msg->msg_namelen = 0; + lock_sock(sk); if (ctx->more) { ctx->more = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc062d2a..a1c4f0a55583 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, long copied = 0; lock_sock(sk); + msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; diff --git a/crypto/gcm.c b/crypto/gcm.c index 137ad1ec5438..13ccbda34ff9 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx { struct crypto_rfc4543_req_ctx { u8 auth_tag[16]; + u8 assocbuf[32]; struct scatterlist cipher[1]; struct scatterlist payload[2]; struct scatterlist assoc[2]; @@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); - sg_init_table(assoc, 2); - sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, - req->assoc->offset); + if (req->assoc->length == req->assoclen) { + sg_init_table(assoc, 2); + sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, + req->assoc->offset); + } else { + BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); + + scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, + req->assoclen, 0); + + sg_init_table(assoc, 2); + sg_set_buf(assoc, rctx->assocbuf, req->assoclen); + } scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 92ed9692c47e..4bf68c8d4797 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD config ACPI_BGRT bool "Boottime Graphics Resource Table support" - depends on EFI + depends on EFI && X86 help This driver adds support for exposing the ACPI Boottime Graphics Resource Table, which allows the operating system to obtain diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c index 82045e3f5cac..a82c7626aa9b 100644 --- a/drivers/acpi/acpi_i2c.c +++ b/drivers/acpi/acpi_i2c.c @@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter) acpi_handle handle; acpi_status status; - handle = ACPI_HANDLE(&adapter->dev); + handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return; diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c index 1e5d8a40101e..fefc2ca7cc3e 100644 --- a/drivers/acpi/apei/cper.c +++ b/drivers/acpi/apei/cper.c @@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus) return rc; data_len = estatus->data_length; gdata = (struct acpi_hest_generic_data *)(estatus + 1); - while (data_len > sizeof(*gdata)) { + while (data_len >= sizeof(*gdata)) { gedata_len = gdata->error_data_length; if (gedata_len > data_len - sizeof(*gdata)) return -EINVAL; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index b80e06e0b2d9..ac8688b89705 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -382,7 +382,6 @@ static int acpi_pci_root_add(struct acpi_device *device, int result; struct acpi_pci_root *root; u32 flags, base_flags; - bool is_osc_granted = false; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -443,6 +442,30 @@ static int acpi_pci_root_add(struct acpi_device *device, flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; acpi_pci_osc_support(root, flags); + /* + * TBD: Need PCI interface for enumeration/configuration of roots. + */ + + mutex_lock(&acpi_pci_root_lock); + list_add_tail(&root->node, &acpi_pci_roots); + mutex_unlock(&acpi_pci_root_lock); + + /* + * Scan the Root Bridge + * -------------------- + * Must do this prior to any attempt to bind the root device, as the + * PCI namespace does not get created until this call is made (and + * thus the root bridge's pci_dev does not exist). + */ + root->bus = pci_acpi_scan_root(root); + if (!root->bus) { + printk(KERN_ERR PREFIX + "Bus %04x:%02x not present in PCI namespace\n", + root->segment, (unsigned int)root->secondary.start); + result = -ENODEV; + goto out_del_root; + } + /* Indicate support for various _OSC capabilities. */ if (pci_ext_cfg_avail()) flags |= OSC_EXT_PCI_CONFIG_SUPPORT; @@ -461,6 +484,7 @@ static int acpi_pci_root_add(struct acpi_device *device, flags = base_flags; } } + if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL @@ -481,54 +505,28 @@ static int acpi_pci_root_add(struct acpi_device *device, status = acpi_pci_osc_control_set(device->handle, &flags, OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); if (ACPI_SUCCESS(status)) { - is_osc_granted = true; dev_info(&device->dev, "ACPI _OSC control (0x%02x) granted\n", flags); + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { + /* + * We have ASPM control, but the FADT indicates + * that it's unsupported. Clear it. + */ + pcie_clear_aspm(root->bus); + } } else { - is_osc_granted = false; dev_info(&device->dev, "ACPI _OSC request failed (%s), " "returned control mask: 0x%02x\n", acpi_format_exception(status), flags); + pr_info("ACPI _OSC control for PCIe not granted, " + "disabling ASPM\n"); + pcie_no_aspm(); } } else { dev_info(&device->dev, - "Unable to request _OSC control " - "(_OSC support mask: 0x%02x)\n", flags); - } - - /* - * TBD: Need PCI interface for enumeration/configuration of roots. - */ - - mutex_lock(&acpi_pci_root_lock); - list_add_tail(&root->node, &acpi_pci_roots); - mutex_unlock(&acpi_pci_root_lock); - - /* - * Scan the Root Bridge - * -------------------- - * Must do this prior to any attempt to bind the root device, as the - * PCI namespace does not get created until this call is made (and - * thus the root bridge's pci_dev does not exist). - */ - root->bus = pci_acpi_scan_root(root); - if (!root->bus) { - printk(KERN_ERR PREFIX - "Bus %04x:%02x not present in PCI namespace\n", - root->segment, (unsigned int)root->secondary.start); - result = -ENODEV; - goto out_del_root; - } - - /* ASPM setting */ - if (is_osc_granted) { - if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) - pcie_clear_aspm(root->bus); - } else { - pr_info("ACPI _OSC control for PCIe not granted, " - "disabling ASPM\n"); - pcie_no_aspm(); + "Unable to request _OSC control " + "(_OSC support mask: 0x%02x)\n", flags); } pci_acpi_add_bus_pm_notifier(device, root->bus); @@ -600,6 +598,7 @@ static void handle_root_bridge_insertion(acpi_handle handle) static void handle_root_bridge_removal(struct acpi_device *device) { + acpi_status status; struct acpi_eject_event *ej_event; ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); @@ -615,7 +614,9 @@ static void handle_root_bridge_removal(struct acpi_device *device) ej_event->device = device; ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; - acpi_bus_hot_remove_device(ej_event); + status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event); + if (ACPI_FAILURE(status)) + kfree(ej_event); } static void _handle_hotplug_event_root(struct work_struct *work) @@ -630,8 +631,9 @@ static void _handle_hotplug_event_root(struct work_struct *work) handle = hp_work->handle; type = hp_work->type; - root = acpi_pci_find_root(handle); + acpi_scan_lock_acquire(); + root = acpi_pci_find_root(handle); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); switch (type) { @@ -665,6 +667,7 @@ static void _handle_hotplug_event_root(struct work_struct *work) break; } + acpi_scan_lock_release(); kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ kfree(buffer.pointer); } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc95308e9a11..ee255c60bdac 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644); static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); -static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; +static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], + acpi_cstate); static int disabled_by_idle_boot_param(void) { @@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); @@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, */ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) { - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); ACPI_FLUSH_CPU_CACHE(); @@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); @@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = acpi_cstate[index]; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); @@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif - acpi_cstate[count] = cx; + per_cpu(acpi_cstate[count], dev->cpu) = cx; count++; if (count == CPUIDLE_STATE_MAX) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 53e7ac9403a7..e854582f29a6 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -465,7 +465,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) return result; } -static int acpi_processor_get_performance_info(struct acpi_processor *pr) +int acpi_processor_get_performance_info(struct acpi_processor *pr) { int result = 0; acpi_status status = AE_OK; @@ -509,7 +509,7 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr) #endif return result; } - +EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); int acpi_processor_notify_smm(struct module *calling_module) { acpi_status status; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 24213033fbae..9c1a435d10e6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, { .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW21M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"), + }, + }, + { + .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB17FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 093c43554963..1f44e56cc65d 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn) EXPORT_SYMBOL(tegra_ahb_enable_smmu); #endif -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM static int tegra_ahb_suspend(struct device *dev) { int i; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 3e751b74615e..a5a3ebcbdd2c 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -59,15 +59,16 @@ config ATA_ACPI option libata.noacpi=1 config SATA_ZPODD - bool "SATA Zero Power ODD Support" + bool "SATA Zero Power Optical Disc Drive (ZPODD) support" depends on ATA_ACPI default n help - This option adds support for SATA ZPODD. It requires both - ODD and the platform support, and if enabled, will automatically - power on/off the ODD when certain condition is satisfied. This - does not impact user's experience of the ODD, only power is saved - when ODD is not in use(i.e. no disc inside). + This option adds support for SATA Zero Power Optical Disc + Drive (ZPODD). It requires both the ODD and the platform + support, and if enabled, will automatically power on/off the + ODD when certain condition is satisfied. This does not impact + end user's experience of the ODD, only power is saved when + the ODD is not in use (i.e. no disc inside). If unsure, say N. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 616952facd6e..251e57d38942 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d2ba439cfe54..2f48123d74c4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -150,6 +150,7 @@ enum piix_controller_ids { tolapai_sata, piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, + ich8_2port_sata_snb, }; struct piix_map_db { @@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = { /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point) */ - { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point-LP) */ @@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = { [ich8m_apple_sata] = &ich8m_apple_map_db, [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, + [ich8_2port_sata_snb] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, + + [ich8_2port_sata_snb] = + { + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR + | PIIX_FLAG_PIO16, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &piix_sata_ops, + }, }; #define AHCI_PCI_BAR 5 @@ -1547,6 +1559,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev) static int prefer_ms_hyperv = 1; module_param(prefer_ms_hyperv, int, 0); +MODULE_PARM_DESC(prefer_ms_hyperv, + "Prefer Hyper-V paravirtualization drivers instead of ATA, " + "0 - Use ATA drivers, " + "1 (Default) - Use the paravirtualization drivers."); static void piix_ignore_devices_quirk(struct ata_host *host) { diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index beea3115577e..8a52dab412e2 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev) handle = ata_dev_acpi_handle(dev); if (handle) - acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev); + acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev); } static void ata_acpi_unregister_power_resource(struct ata_device *dev) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 497adea1f0d6..63c743baf920 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) * from SATA Settings page of Identify Device Data Log. */ if (ata_id_has_devslp(dev->id)) { - u8 sata_setting[ATA_SECT_SIZE]; + u8 *sata_setting = ap->sector_buf; int i, j; dev->flags |= ATA_DFLAG_DEVSLP; @@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) + dev->max_sectors = ATA_MAX_SECTORS_LBA48; + if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, + { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, /* Devices we expect to fail diagnostics */ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 318b41358187..ff44787e5a45 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == 0 && - sshdr.asc == 0 && sshdr.ascq == 0) + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } @@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == 0 && - sshdr.asc == 0 && sshdr.ascq == 0) + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 70b0e01372b3..6ef27e98c508 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = { }, }; -static int __init pata_s3c_init(void) -{ - return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe); -} - -static void __exit pata_s3c_exit(void) -{ - platform_driver_unregister(&pata_s3c_driver); -} - -module_init(pata_s3c_init); -module_exit(pata_s3c_exit); +module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe); MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 124b2c1d9c0b..608f82fed632 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1511,8 +1511,7 @@ error_exit_with_cleanup: if (hcr_base) iounmap(hcr_base); - if (host_priv) - kfree(host_priv); + kfree(host_priv); return retval; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef258..71671c42ef45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -46,6 +46,7 @@ #include "power.h" static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) struct pm_qos_constraints *c; struct pm_qos_flags *f; - mutex_lock(&dev_pm_qos_mtx); + mutex_lock(&dev_pm_qos_sysfs_mtx); /* * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ + pm_qos_sysfs_remove_latency(dev); + pm_qos_sysfs_remove_flags(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_latency_limit(dev); __dev_pm_qos_hide_flags(dev); @@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) out: mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } /** @@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, kfree(req); } +static void dev_pm_qos_drop_user_request(struct device *dev, + enum dev_pm_qos_req_type type) +{ + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_drop_user_request(dev, type); + mutex_unlock(&dev_pm_qos_mtx); +} + /** * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. * @dev: Device whose PM QoS latency limit is to be exposed to user space. @@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) return ret; } + mutex_lock(&dev_pm_qos_sysfs_mtx); + mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(dev->power.qos)) @@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (ret < 0) { __dev_pm_qos_remove_request(req); kfree(req); + mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->latency_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_latency(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); out: - mutex_unlock(&dev_pm_qos_mtx); + mutex_unlock(&dev_pm_qos_sysfs_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); static void __dev_pm_qos_hide_latency_limit(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { - pm_qos_sysfs_remove_latency(dev); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); - } } /** @@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) */ void dev_pm_qos_hide_latency_limit(struct device *dev) { + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_latency(dev); + mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_latency_limit(dev); mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) } pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + mutex_lock(&dev_pm_qos_mtx); if (IS_ERR_OR_NULL(dev->power.qos)) @@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) if (ret < 0) { __dev_pm_qos_remove_request(req); kfree(req); + mutex_unlock(&dev_pm_qos_mtx); goto out; } - dev->power.qos->flags_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_flags(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); out: - mutex_unlock(&dev_pm_qos_mtx); + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); return ret; } @@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); static void __dev_pm_qos_hide_flags(struct device *dev) { - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { - pm_qos_sysfs_remove_flags(dev); + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); - } } /** @@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) void dev_pm_qos_hide_flags(struct device *dev) { pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_flags(dev); + mutex_lock(&dev_pm_qos_mtx); __dev_pm_qos_hide_flags(dev); mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06e..79f4fca9877a 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, base = 0; if (max < rbnode->base_reg + rbnode->blklen) - end = rbnode->base_reg + rbnode->blklen - max; + end = max - rbnode->base_reg + 1; else end = rbnode->blklen; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d2367501fd0..58cfb3232428 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -710,12 +710,12 @@ skip_format_initialization: } } + regmap_debugfs_init(map, config->name); + ret = regcache_init(map, config); if (ret != 0) goto err_range; - regmap_debugfs_init(map, config->name); - /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); if (!m) { @@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, kfree(async->work_buf); kfree(async); } + + return ret; } trace_regmap_hw_write_start(map->dev, reg, diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 5dc0daed8fac..b81ddfea1da0 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -532,11 +532,11 @@ config BLK_DEV_RBD If unsure, say N. config BLK_DEV_RSXX - tristate "RamSam PCIe Flash SSD Device Driver" + tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" depends on PCI help Device driver for IBM's high speed PCIe SSD - storage devices: RamSan-70 and RamSan-80. + storage devices: FlashSystem-70 and FlashSystem-80. To compile this driver as a module, choose M here: the module will be called rsxx. diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 25ef5c014fca..92b6d7c51e39 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -51,8 +51,9 @@ new_skb(ulong len) { struct sk_buff *skb; - skb = alloc_skb(len, GFP_ATOMIC); + skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); if (skb) { + skb_reserve(skb, MAX_HEADER); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index ade58bc8f3c4..1c1b8e544aa2 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h) if (rc) return rc; h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, - cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); + cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); if (!h->cfgtable) return -ENOMEM; rc = write_driver_ver_to_cfgtable(h->cfgtable); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 747bb2af69dc..dfe758382eaf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->lo_flags |= LO_FLAGS_PARTSCAN; if (lo->lo_flags & LO_FLAGS_PARTSCAN) ioctl_by_bdev(bdev, BLKRRPART, 0); + + /* Grab the block_device to prevent its destruction after we + * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). + */ + bdgrab(bdev); return 0; out_clr: @@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); - if (bdev) + if (bdev) { + bdput(bdev); invalidate_bdev(bdev); + } set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { @@ -1623,6 +1630,7 @@ static int loop_add(struct loop_device **l, int i) goto out_free_dev; i = err; + err = -ENOMEM; lo->lo_queue = blk_alloc_queue(GFP_KERNEL); if (!lo->lo_queue) goto out_free_dev; diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 1788f491e0fb..076ae7f1b781 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev) gpio_direction_output(host->rst, 1); /* reset out pin */ - if (!(prv_data->dev_attr & MG_DEV_MASK)) + if (!(prv_data->dev_attr & MG_DEV_MASK)) { + err = -EINVAL; goto probe_err_3a; + } if (prv_data->dev_attr != MG_BOOT_DEV) { rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 11cc9522cdd4..32c678028e53 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -81,12 +81,17 @@ /* Device instance number, incremented each time a device is probed. */ static int instance; +struct list_head online_list; +struct list_head removing_list; +spinlock_t dev_lock; + /* * Global variable used to hold the major block device number * allocated in mtip_init(). */ static int mtip_major; static struct dentry *dfs_parent; +static struct dentry *dfs_device_status; static u32 cpu_use[NR_CPUS]; @@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag) /* * Reset the HBA (without sleeping) * - * Just like hba_reset, except does not call sleep, so can be - * run from interrupt/tasklet context. - * * @dd Pointer to the driver data structure. * * return value * 0 The reset was successful. * -1 The HBA Reset bit did not clear. */ -static int hba_reset_nosleep(struct driver_data *dd) +static int mtip_hba_reset(struct driver_data *dd) { unsigned long timeout; - /* Chip quirk: quiesce any chip function */ - mdelay(10); - /* Set the reset bit */ writel(HOST_RESET, dd->mmio + HOST_CTL); /* Flush */ readl(dd->mmio + HOST_CTL); - /* - * Wait 10ms then spin for up to 1 second - * waiting for reset acknowledgement - */ - timeout = jiffies + msecs_to_jiffies(1000); - mdelay(10); - while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) - && time_before(jiffies, timeout)) - mdelay(1); + /* Spin for up to 2 seconds, waiting for reset acknowledgement */ + timeout = jiffies + msecs_to_jiffies(2000); + do { + mdelay(10); + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) + return -1; - if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) - return -1; + } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) + && time_before(jiffies, timeout)); if (readl(dd->mmio + HOST_CTL) & HOST_RESET) return -1; @@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port) dev_warn(&port->dd->pdev->dev, "PxCMD.CR not clear, escalating reset\n"); - if (hba_reset_nosleep(port->dd)) + if (mtip_hba_reset(port->dd)) dev_err(&port->dd->pdev->dev, "HBA reset escalation failed.\n"); @@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port) } +static int mtip_device_reset(struct driver_data *dd) +{ + int rv = 0; + + if (mtip_check_surprise_removal(dd->pdev)) + return 0; + + if (mtip_hba_reset(dd) < 0) + rv = -EFAULT; + + mdelay(1); + mtip_init_port(dd->port); + mtip_start_port(dd->port); + + /* Enable interrupts on the HBA. */ + writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, + dd->mmio + HOST_CTL); + return rv; +} + /* * Helper function for tag logging */ @@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data) if (cmdto_cnt) { print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - mtip_restart_port(port); + mtip_device_reset(port->dd); wake_up_interruptible(&port->svc_wait); } clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); @@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, int rv = 0, ready2go = 1; struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; unsigned long to; + struct driver_data *dd = port->dd; /* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) { - dev_err(&port->dd->pdev->dev, - "SG buffer is not 8 byte aligned\n"); + dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); return -EFAULT; } @@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port, mdelay(100); } while (time_before(jiffies, to)); if (!ready2go) { - dev_warn(&port->dd->pdev->dev, + dev_warn(&dd->pdev->dev, "Internal cmd active. new cmd [%02X]\n", fis->command); return -EBUSY; } set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); port->ic_pause_timer = 0; - if (fis->command == ATA_CMD_SEC_ERASE_UNIT) - clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); - else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) - clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); + clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); + clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); if (atomic == GFP_KERNEL) { if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ if (mtip_quiesce_io(port, 5000) < 0) { - dev_warn(&port->dd->pdev->dev, + dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n"); release_slot(port, MTIP_TAG_INTERNAL); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); @@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port, /* Issue the command to the hardware */ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); - /* Poll if atomic, wait_for_completion otherwise */ if (atomic == GFP_KERNEL) { /* Wait for the command to complete or timeout. */ - if (wait_for_completion_timeout( + if (wait_for_completion_interruptible_timeout( &wait, - msecs_to_jiffies(timeout)) == 0) { - dev_err(&port->dd->pdev->dev, - "Internal command did not complete [%d] " - "within timeout of %lu ms\n", - atomic, timeout); - if (mtip_check_surprise_removal(port->dd->pdev) || + msecs_to_jiffies(timeout)) <= 0) { + if (rv == -ERESTARTSYS) { /* interrupted */ + dev_err(&dd->pdev->dev, + "Internal command [%02X] was interrupted after %lu ms\n", + fis->command, timeout); + rv = -EINTR; + goto exec_ic_exit; + } else if (rv == 0) /* timeout */ + dev_err(&dd->pdev->dev, + "Internal command did not complete [%02X] within timeout of %lu ms\n", + fis->command, timeout); + else + dev_err(&dd->pdev->dev, + "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", + fis->command, rv, timeout); + + if (mtip_check_surprise_removal(dd->pdev) || test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &port->dd->dd_flag)) { + &dd->dd_flag)) { + dev_err(&dd->pdev->dev, + "Internal command [%02X] wait returned due to SR\n", + fis->command); rv = -ENXIO; goto exec_ic_exit; } + mtip_device_reset(dd); /* recover from timeout issue */ rv = -EAGAIN; + goto exec_ic_exit; } } else { + u32 hba_stat, port_stat; + /* Spin for <timeout> checking if command still outstanding */ timeout = jiffies + msecs_to_jiffies(timeout); while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) && time_before(jiffies, timeout)) { - if (mtip_check_surprise_removal(port->dd->pdev)) { + if (mtip_check_surprise_removal(dd->pdev)) { rv = -ENXIO; goto exec_ic_exit; } if ((fis->command != ATA_CMD_STANDBYNOW1) && test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &port->dd->dd_flag)) { + &dd->dd_flag)) { rv = -ENXIO; goto exec_ic_exit; } - if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { - atomic_inc(&int_cmd->active); /* error */ - break; + port_stat = readl(port->mmio + PORT_IRQ_STAT); + if (!port_stat) + continue; + + if (port_stat & PORT_IRQ_ERR) { + dev_err(&dd->pdev->dev, + "Internal command [%02X] failed\n", + fis->command); + mtip_device_reset(dd); + rv = -EIO; + goto exec_ic_exit; + } else { + writel(port_stat, port->mmio + PORT_IRQ_STAT); + hba_stat = readl(dd->mmio + HOST_IRQ_STAT); + if (hba_stat) + writel(hba_stat, + dd->mmio + HOST_IRQ_STAT); } + break; } } - if (atomic_read(&int_cmd->active) > 1) { - dev_err(&port->dd->pdev->dev, - "Internal command [%02X] failed\n", fis->command); - rv = -EIO; - } if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) { rv = -ENXIO; - if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &port->dd->dd_flag)) { - mtip_restart_port(port); + if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { + mtip_device_reset(dd); rv = -EAGAIN; } } @@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, * -EINVAL Invalid parameters passed in, trim not supported * -EIO Error submitting trim request to hw */ -static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) +static int mtip_send_trim(struct driver_data *dd, unsigned int lba, + unsigned int len) { int i, rv = 0; u64 tlba, tlen, sect_left; @@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) } /* - * Reset the HBA. - * - * Resets the HBA by setting the HBA Reset bit in the Global - * HBA Control register. After setting the HBA Reset bit the - * function waits for 1 second before reading the HBA Reset - * bit to make sure it has cleared. If HBA Reset is not clear - * an error is returned. Cannot be used in non-blockable - * context. - * - * @dd Pointer to the driver data structure. - * - * return value - * 0 The reset was successful. - * -1 The HBA Reset bit did not clear. - */ -static int mtip_hba_reset(struct driver_data *dd) -{ - mtip_deinit_port(dd->port); - - /* Set the reset bit */ - writel(HOST_RESET, dd->mmio + HOST_CTL); - - /* Flush */ - readl(dd->mmio + HOST_CTL); - - /* Wait for reset to clear */ - ssleep(1); - - /* Check the bit has cleared */ - if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { - dev_err(&dd->pdev->dev, - "Reset bit did not clear.\n"); - return -1; - } - - return 0; -} - -/* * Display the identify command data. * * @port Pointer to the port data structure. @@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev, static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); +/* debugsfs entries */ + +static ssize_t show_device_status(struct device_driver *drv, char *buf) +{ + int size = 0; + struct driver_data *dd, *tmp; + unsigned long flags; + char id_buf[42]; + u16 status = 0; + + spin_lock_irqsave(&dev_lock, flags); + size += sprintf(&buf[size], "Devices Present:\n"); + list_for_each_entry_safe(dd, tmp, &online_list, online_list) { + if (dd->pdev) { + if (dd->port && + dd->port->identify && + dd->port->identify_valid) { + strlcpy(id_buf, + (char *) (dd->port->identify + 10), 21); + status = *(dd->port->identify + 141); + } else { + memset(id_buf, 0, 42); + status = 0; + } + + if (dd->port && + test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { + size += sprintf(&buf[size], + " device %s %s (ftl rebuild %d %%)\n", + dev_name(&dd->pdev->dev), + id_buf, + status); + } else { + size += sprintf(&buf[size], + " device %s %s\n", + dev_name(&dd->pdev->dev), + id_buf); + } + } + } + + size += sprintf(&buf[size], "Devices Being Removed:\n"); + list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { + if (dd->pdev) { + if (dd->port && + dd->port->identify && + dd->port->identify_valid) { + strlcpy(id_buf, + (char *) (dd->port->identify+10), 21); + status = *(dd->port->identify + 141); + } else { + memset(id_buf, 0, 42); + status = 0; + } + + if (dd->port && + test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { + size += sprintf(&buf[size], + " device %s %s (ftl rebuild %d %%)\n", + dev_name(&dd->pdev->dev), + id_buf, + status); + } else { + size += sprintf(&buf[size], + " device %s %s\n", + dev_name(&dd->pdev->dev), + id_buf); + } + } + } + spin_unlock_irqrestore(&dev_lock, flags); + + return size; +} + +static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, + size_t len, loff_t *offset) +{ + int size = *offset; + char buf[MTIP_DFS_MAX_BUF_SIZE]; + + if (!len || *offset) + return 0; + + size += show_device_status(NULL, buf); + + *offset = size <= len ? size : len; + size = copy_to_user(ubuf, buf, *offset); + if (size) + return -EFAULT; + + return *offset; +} + static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { @@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, return *offset; } +static const struct file_operations mtip_device_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mtip_hw_read_device_status, + .llseek = no_llseek, +}; + static const struct file_operations mtip_regs_fops = { .owner = THIS_MODULE, .open = simple_open, @@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, const struct cpumask *node_mask; int cpu, i = 0, j = 0; int my_node = NUMA_NO_NODE; + unsigned long flags; /* Allocate memory for this devices private data. */ my_node = pcibus_to_node(pdev->bus); @@ -4218,12 +4322,16 @@ static int mtip_pci_probe(struct pci_dev *pdev, dd->pdev = pdev; dd->numa_node = my_node; + INIT_LIST_HEAD(&dd->online_list); + INIT_LIST_HEAD(&dd->remove_list); + memset(dd->workq_name, 0, 32); snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); dd->isr_workq = create_workqueue(dd->workq_name); if (!dd->isr_workq) { dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); + rv = -ENOMEM; goto block_initialize_err; } @@ -4282,7 +4390,8 @@ static int mtip_pci_probe(struct pci_dev *pdev, INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); pci_set_master(pdev); - if (pci_enable_msi(pdev)) { + rv = pci_enable_msi(pdev); + if (rv) { dev_warn(&pdev->dev, "Unable to enable MSI interrupt.\n"); goto block_initialize_err; @@ -4303,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev, instance++; if (rv != MTIP_FTL_REBUILD_MAGIC) set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); + else + rv = 0; /* device in rebuild state, return 0 from probe */ + + /* Add to online list even if in ftl rebuild */ + spin_lock_irqsave(&dev_lock, flags); + list_add(&dd->online_list, &online_list); + spin_unlock_irqrestore(&dev_lock, flags); + goto done; block_initialize_err: @@ -4336,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); int counter = 0; + unsigned long flags; set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); + spin_lock_irqsave(&dev_lock, flags); + list_del_init(&dd->online_list); + list_add(&dd->remove_list, &removing_list); + spin_unlock_irqrestore(&dev_lock, flags); + if (mtip_check_surprise_removal(pdev)) { while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { counter++; @@ -4364,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev) pci_disable_msi(pdev); + spin_lock_irqsave(&dev_lock, flags); + list_del_init(&dd->remove_list); + spin_unlock_irqrestore(&dev_lock, flags); + kfree(dd); pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); } @@ -4511,6 +4638,11 @@ static int __init mtip_init(void) pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); + spin_lock_init(&dev_lock); + + INIT_LIST_HEAD(&online_list); + INIT_LIST_HEAD(&removing_list); + /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { @@ -4520,11 +4652,18 @@ static int __init mtip_init(void) } mtip_major = error; - if (!dfs_parent) { - dfs_parent = debugfs_create_dir("rssd", NULL); - if (IS_ERR_OR_NULL(dfs_parent)) { - pr_warn("Error creating debugfs parent\n"); - dfs_parent = NULL; + dfs_parent = debugfs_create_dir("rssd", NULL); + if (IS_ERR_OR_NULL(dfs_parent)) { + pr_warn("Error creating debugfs parent\n"); + dfs_parent = NULL; + } + if (dfs_parent) { + dfs_device_status = debugfs_create_file("device_status", + S_IRUGO, dfs_parent, NULL, + &mtip_device_status_fops); + if (IS_ERR_OR_NULL(dfs_device_status)) { + pr_err("Error creating device_status node\n"); + dfs_device_status = NULL; } } diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bffff5f670c..8e8334c9dd0f 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -129,9 +129,9 @@ enum { MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ - MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ - (1 << MTIP_PF_EH_ACTIVE_BIT) | \ - (1 << MTIP_PF_SE_ACTIVE_BIT) | \ + MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | + (1 << MTIP_PF_EH_ACTIVE_BIT) | + (1 << MTIP_PF_SE_ACTIVE_BIT) | (1 << MTIP_PF_DM_ACTIVE_BIT)), MTIP_PF_SVC_THD_ACTIVE_BIT = 4, @@ -144,9 +144,9 @@ enum { MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, - MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ - (1 << MTIP_DDF_SEC_LOCK_BIT) | \ - (1 << MTIP_DDF_OVER_TEMP_BIT) | \ + MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | + (1 << MTIP_DDF_SEC_LOCK_BIT) | + (1 << MTIP_DDF_OVER_TEMP_BIT) | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), MTIP_DDF_CLEANUP_BIT = 5, @@ -180,7 +180,7 @@ struct mtip_work { #define MTIP_TRIM_TIMEOUT_MS 240000 #define MTIP_MAX_TRIM_ENTRIES 8 -#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 +#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 struct mtip_trim_entry { u32 lba; /* starting lba of region */ @@ -501,6 +501,10 @@ struct driver_data { atomic_t irq_workers_active; int isr_binding; + + struct list_head online_list; /* linkage for online list */ + + struct list_head remove_list; /* linkage for removing list */ }; #endif diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 07fb2dfaae13..9dcefe40380b 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c @@ -135,6 +135,7 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); + BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); } typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, @@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, *fn = special_completion; return CMD_CTX_INVALID; } - *fn = info[cmdid].fn; + if (fn) + *fn = info[cmdid].fn; ctx = info[cmdid].ctx; info[cmdid].fn = special_completion; info[cmdid].ctx = CMD_CTX_COMPLETED; @@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) iod->offset = offsetof(struct nvme_iod, sg[nseg]); iod->npages = -1; iod->length = nbytes; + iod->nents = 0; } return iod; @@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, struct bio *bio = iod->private; u16 status = le16_to_cpup(&cqe->status) >> 1; - dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + if (iod->nents) + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); nvme_free_iod(dev, iod); if (status) { @@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); if (result < 0) - goto free_iod; + goto free_cmdid; length = result; cmnd->rw.command_id = cmdid; @@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, return 0; + free_cmdid: + free_cmdid(nvmeq, cmdid, NULL); free_iod: nvme_free_iod(nvmeq->dev, iod); nomem: @@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, return nvme_submit_admin_cmd(dev, &c, NULL); } -static int nvme_get_features(struct nvme_dev *dev, unsigned fid, - unsigned nsid, dma_addr_t dma_addr) +static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, + dma_addr_t dma_addr, u32 *result) { struct nvme_command c; @@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - return nvme_submit_admin_cmd(dev, &c, NULL); + return nvme_submit_admin_cmd(dev, &c, result); } static int nvme_set_features(struct nvme_dev *dev, unsigned fid, @@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) spin_lock_irq(&nvmeq->q_lock); nvme_cancel_ios(nvmeq, false); + while (bio_list_peek(&nvmeq->sq_cong)) { + struct bio *bio = bio_list_pop(&nvmeq->sq_cong); + bio_endio(bio, -EIO); + } spin_unlock_irq(&nvmeq->q_lock); irq_set_affinity_hint(vector, NULL); @@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, if (length != cmd.data_len) status = -ENOMEM; else - status = nvme_submit_admin_cmd(dev, &c, NULL); + status = nvme_submit_admin_cmd(dev, &c, &cmd.result); if (cmd.data_len) { nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); nvme_free_iod(dev, iod); } + + if (!status && copy_to_user(&ucmd->result, &cmd.result, + sizeof(cmd.result))) + status = -EFAULT; + return status; } @@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev) continue; res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, - dma_addr + 4096); + dma_addr + 4096, NULL); if (res) - continue; + memset(mem + 4096, 0, 4096); ns = nvme_alloc_ns(dev, i, mem, mem + 4096); if (ns) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6c81a4c040b9..b7b7a88d9f68 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request) return atomic_read(&obj_request->done) != 0; } +static void +rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) +{ + dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, + obj_request, obj_request->img_request, obj_request->result, + obj_request->xferred, obj_request->length); + /* + * ENOENT means a hole in the image. We zero-fill the + * entire length of the request. A short read also implies + * zero-fill to the end of the request. Either way we + * update the xferred count to indicate the whole request + * was satisfied. + */ + BUG_ON(obj_request->type != OBJ_REQUEST_BIO); + if (obj_request->result == -ENOENT) { + zero_bio_chain(obj_request->bio_list, 0); + obj_request->result = 0; + obj_request->xferred = obj_request->length; + } else if (obj_request->xferred < obj_request->length && + !obj_request->result) { + zero_bio_chain(obj_request->bio_list, obj_request->xferred); + obj_request->xferred = obj_request->length; + } + obj_request_done_set(obj_request); +} + static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) { dout("%s: obj %p cb %p\n", __func__, obj_request, @@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) { dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, obj_request->result, obj_request->xferred, obj_request->length); - /* - * ENOENT means a hole in the object. We zero-fill the - * entire length of the request. A short read also implies - * zero-fill to the end of the request. Either way we - * update the xferred count to indicate the whole request - * was satisfied. - */ - if (obj_request->result == -ENOENT) { - zero_bio_chain(obj_request->bio_list, 0); - obj_request->result = 0; - obj_request->xferred = obj_request->length; - } else if (obj_request->xferred < obj_request->length && - !obj_request->result) { - zero_bio_chain(obj_request->bio_list, obj_request->xferred); - obj_request->xferred = obj_request->length; - } - obj_request_done_set(obj_request); + if (obj_request->img_request) + rbd_img_obj_request_read_callback(obj_request); + else + obj_request_done_set(obj_request); } static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) @@ -1729,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) struct rbd_device *rbd_dev = img_request->rbd_dev; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; + struct rbd_obj_request *next_obj_request; dout("%s: img %p\n", __func__, img_request); - for_each_obj_request(img_request, obj_request) { + for_each_obj_request_safe(img_request, obj_request, next_obj_request) { int ret; obj_request->callback = rbd_img_obj_callback; diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile index f35cd0b71f7b..b1c53c0aa450 100644 --- a/drivers/block/rsxx/Makefile +++ b/drivers/block/rsxx/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o -rsxx-y := config.o core.o cregs.o dev.o dma.o +rsxx-objs := config.o core.o cregs.o dev.o dma.o diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c index a295e7e9ee41..10cd530d3e10 100644 --- a/drivers/block/rsxx/config.c +++ b/drivers/block/rsxx/config.c @@ -29,15 +29,13 @@ #include "rsxx_priv.h" #include "rsxx_cfg.h" -static void initialize_config(void *config) +static void initialize_config(struct rsxx_card_cfg *cfg) { - struct rsxx_card_cfg *cfg = config; - cfg->hdr.version = RSXX_CFG_VERSION; cfg->data.block_size = RSXX_HW_BLK_SIZE; cfg->data.stripe_size = RSXX_HW_BLK_SIZE; - cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; + cfg->data.vendor_id = RSXX_VENDOR_ID_IBM; cfg->data.cache_order = (-1); cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; cfg->data.intr_coal.count = 0; @@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card) } else { dev_info(CARD_TO_DEV(card), "Initializing card configuration.\n"); - initialize_config(card); + initialize_config(&card->config); st = rsxx_save_config(card); if (st) return st; diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index e5162487686a..5af21f2db29c 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -30,6 +30,7 @@ #include <linux/reboot.h> #include <linux/slab.h> #include <linux/bitops.h> +#include <linux/delay.h> #include <linux/genhd.h> #include <linux/idr.h> @@ -39,8 +40,8 @@ #define NO_LEGACY 0 -MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); -MODULE_AUTHOR("IBM <support@ramsan.com>"); +MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); +MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); @@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida); static DEFINE_SPINLOCK(rsxx_ida_lock); /*----------------- Interrupt Control & Handling -------------------*/ + +static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) +{ + card->isr_mask = 0; + card->ier_mask = 0; +} + static void __enable_intr(unsigned int *mask, unsigned int intr) { *mask |= intr; @@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr) */ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) { - if (unlikely(card->halt)) + if (unlikely(card->halt) || + unlikely(card->eeh_state)) return; __enable_intr(&card->ier_mask, intr); @@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) { + if (unlikely(card->eeh_state)) + return; + __disable_intr(&card->ier_mask, intr); iowrite32(card->ier_mask, card->regmap + IER); } @@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, unsigned int intr) { - if (unlikely(card->halt)) + if (unlikely(card->halt) || + unlikely(card->eeh_state)) return; __enable_intr(&card->isr_mask, intr); @@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, unsigned int intr) { + if (unlikely(card->eeh_state)) + return; + __disable_intr(&card->isr_mask, intr); __disable_intr(&card->ier_mask, intr); iowrite32(card->ier_mask, card->regmap + IER); @@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) do { reread_isr = 0; + if (unlikely(card->eeh_state)) + break; + isr = ioread32(card->regmap + ISR); if (isr == 0xffffffff) { /* @@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) } /*----------------- Card Event Handler -------------------*/ -static char *rsxx_card_state_to_str(unsigned int state) +static const char * const rsxx_card_state_to_str(unsigned int state) { - static char *state_strings[] = { + static const char * const state_strings[] = { "Unknown", "Shutdown", "Starting", "Formatting", "Uninitialized", "Good", "Shutting Down", "Fault", "Read Only Fault", "dStroying" @@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card) return 0; } +static int rsxx_eeh_frozen(struct pci_dev *dev) +{ + struct rsxx_cardinfo *card = pci_get_drvdata(dev); + int i; + int st; + + dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); + + card->eeh_state = 1; + rsxx_mask_interrupts(card); + + /* + * We need to guarantee that the write for eeh_state and masking + * interrupts does not become reordered. This will prevent a possible + * race condition with the EEH code. + */ + wmb(); + + pci_disable_device(dev); + + st = rsxx_eeh_save_issued_dmas(card); + if (st) + return st; + + rsxx_eeh_save_issued_creg(card); + + for (i = 0; i < card->n_targets; i++) { + if (card->ctrl[i].status.buf) + pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, + card->ctrl[i].status.buf, + card->ctrl[i].status.dma_addr); + if (card->ctrl[i].cmd.buf) + pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, + card->ctrl[i].cmd.buf, + card->ctrl[i].cmd.dma_addr); + } + + return 0; +} + +static void rsxx_eeh_failure(struct pci_dev *dev) +{ + struct rsxx_cardinfo *card = pci_get_drvdata(dev); + int i; + + dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); + + card->eeh_state = 1; + + for (i = 0; i < card->n_targets; i++) + del_timer_sync(&card->ctrl[i].activity_timer); + + rsxx_eeh_cancel_dmas(card); +} + +static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) +{ + unsigned int status; + int iter = 0; + + /* We need to wait for the hardware to reset */ + while (iter++ < 10) { + status = ioread32(card->regmap + PCI_RECONFIG); + + if (status & RSXX_FLUSH_BUSY) { + ssleep(1); + continue; + } + + if (status & RSXX_FLUSH_TIMEOUT) + dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n"); + return 0; + } + + /* Hardware failed resetting itself. */ + return -1; +} + +static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, + enum pci_channel_state error) +{ + int st; + + if (dev->revision < RSXX_EEH_SUPPORT) + return PCI_ERS_RESULT_NONE; + + if (error == pci_channel_io_perm_failure) { + rsxx_eeh_failure(dev); + return PCI_ERS_RESULT_DISCONNECT; + } + + st = rsxx_eeh_frozen(dev); + if (st) { + dev_err(&dev->dev, "Slot reset setup failed\n"); + rsxx_eeh_failure(dev); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) +{ + struct rsxx_cardinfo *card = pci_get_drvdata(dev); + unsigned long flags; + int i; + int st; + + dev_warn(&dev->dev, + "IBM FlashSystem PCI: recovering from slot reset.\n"); + + st = pci_enable_device(dev); + if (st) + goto failed_hw_setup; + + pci_set_master(dev); + + st = rsxx_eeh_fifo_flush_poll(card); + if (st) + goto failed_hw_setup; + + rsxx_dma_queue_reset(card); + + for (i = 0; i < card->n_targets; i++) { + st = rsxx_hw_buffers_init(dev, &card->ctrl[i]); + if (st) + goto failed_hw_buffers_init; + } + + if (card->config_valid) + rsxx_dma_configure(card); + + /* Clears the ISR register from spurious interrupts */ + st = ioread32(card->regmap + ISR); + + card->eeh_state = 0; + + st = rsxx_eeh_remap_dmas(card); + if (st) + goto failed_remap_dmas; + + spin_lock_irqsave(&card->irq_lock, flags); + if (card->n_targets & RSXX_MAX_TARGETS) + rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); + else + rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C); + spin_unlock_irqrestore(&card->irq_lock, flags); + + rsxx_kick_creg_queue(card); + + for (i = 0; i < card->n_targets; i++) { + spin_lock(&card->ctrl[i].queue_lock); + if (list_empty(&card->ctrl[i].queue)) { + spin_unlock(&card->ctrl[i].queue_lock); + continue; + } + spin_unlock(&card->ctrl[i].queue_lock); + + queue_work(card->ctrl[i].issue_wq, + &card->ctrl[i].issue_dma_work); + } + + dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); + + return PCI_ERS_RESULT_RECOVERED; + +failed_hw_buffers_init: +failed_remap_dmas: + for (i = 0; i < card->n_targets; i++) { + if (card->ctrl[i].status.buf) + pci_free_consistent(card->dev, + STATUS_BUFFER_SIZE8, + card->ctrl[i].status.buf, + card->ctrl[i].status.dma_addr); + if (card->ctrl[i].cmd.buf) + pci_free_consistent(card->dev, + COMMAND_BUFFER_SIZE8, + card->ctrl[i].cmd.buf, + card->ctrl[i].cmd.dma_addr); + } +failed_hw_setup: + rsxx_eeh_failure(dev); + return PCI_ERS_RESULT_DISCONNECT; + +} + /*----------------- Driver Initialization & Setup -------------------*/ /* Returns: 0 if the driver is compatible with the device -1 if the driver is NOT compatible with the device */ @@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, spin_lock_init(&card->irq_lock); card->halt = 0; + card->eeh_state = 0; spin_lock_irq(&card->irq_lock); rsxx_disable_ier_and_isr(card, CR_INTR_ALL); @@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev) rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); spin_unlock_irqrestore(&card->irq_lock, flags); - /* Prevent work_structs from re-queuing themselves. */ - card->halt = 1; - cancel_work_sync(&card->event_work); rsxx_destroy_dev(card); @@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev) spin_lock_irqsave(&card->irq_lock, flags); rsxx_disable_ier_and_isr(card, CR_INTR_ALL); spin_unlock_irqrestore(&card->irq_lock, flags); + + /* Prevent work_structs from re-queuing themselves. */ + card->halt = 1; + free_irq(dev->irq, card); if (!force_legacy) @@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev) card_shutdown(card); } +static const struct pci_error_handlers rsxx_err_handler = { + .error_detected = rsxx_error_detected, + .slot_reset = rsxx_slot_reset, +}; + static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)}, - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)}, + {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)}, + {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)}, {0,}, }; @@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = { .remove = rsxx_pci_remove, .suspend = rsxx_pci_suspend, .shutdown = rsxx_pci_shutdown, + .err_handler = &rsxx_err_handler, }; static int __init rsxx_core_init(void) diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c index 80bbe639fccd..4b5c020a0a65 100644 --- a/drivers/block/rsxx/cregs.c +++ b/drivers/block/rsxx/cregs.c @@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool; #error Unknown endianess!!! Aborting... #endif -static void copy_to_creg_data(struct rsxx_cardinfo *card, +static int copy_to_creg_data(struct rsxx_cardinfo *card, int cnt8, void *buf, unsigned int stream) @@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card, int i = 0; u32 *data = buf; + if (unlikely(card->eeh_state)) + return -EIO; + for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { /* * Firmware implementation makes it necessary to byte swap on @@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card, else iowrite32(data[i], card->regmap + CREG_DATA(i)); } + + return 0; } -static void copy_from_creg_data(struct rsxx_cardinfo *card, +static int copy_from_creg_data(struct rsxx_cardinfo *card, int cnt8, void *buf, unsigned int stream) @@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card, int i = 0; u32 *data = buf; + if (unlikely(card->eeh_state)) + return -EIO; + for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { /* * Firmware implementation makes it necessary to byte swap on @@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card, else data[i] = ioread32(card->regmap + CREG_DATA(i)); } -} - -static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card) -{ - struct creg_cmd *cmd; - /* - * Spin lock is needed because this can be called in atomic/interrupt - * context. - */ - spin_lock_bh(&card->creg_ctrl.lock); - cmd = card->creg_ctrl.active_cmd; - card->creg_ctrl.active_cmd = NULL; - spin_unlock_bh(&card->creg_ctrl.lock); - - return cmd; + return 0; } static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) { + int st; + + if (unlikely(card->eeh_state)) + return; + iowrite32(cmd->addr, card->regmap + CREG_ADD); iowrite32(cmd->cnt8, card->regmap + CREG_CNT); if (cmd->op == CREG_OP_WRITE) { - if (cmd->buf) - copy_to_creg_data(card, cmd->cnt8, - cmd->buf, cmd->stream); + if (cmd->buf) { + st = copy_to_creg_data(card, cmd->cnt8, + cmd->buf, cmd->stream); + if (st) + return; + } } - /* - * Data copy must complete before initiating the command. This is - * needed for weakly ordered processors (i.e. PowerPC), so that all - * neccessary registers are written before we kick the hardware. - */ - wmb(); + if (unlikely(card->eeh_state)) + return; /* Setting the valid bit will kick off the command. */ iowrite32(cmd->op, card->regmap + CREG_CMD); @@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card, cmd->cb_private = cb_private; cmd->status = 0; - spin_lock(&card->creg_ctrl.lock); + spin_lock_bh(&card->creg_ctrl.lock); list_add_tail(&cmd->list, &card->creg_ctrl.queue); card->creg_ctrl.q_depth++; creg_kick_queue(card); - spin_unlock(&card->creg_ctrl.lock); + spin_unlock_bh(&card->creg_ctrl.lock); return 0; } @@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data) struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; struct creg_cmd *cmd; - cmd = pop_active_cmd(card); + spin_lock(&card->creg_ctrl.lock); + cmd = card->creg_ctrl.active_cmd; + card->creg_ctrl.active_cmd = NULL; + spin_unlock(&card->creg_ctrl.lock); + if (cmd == NULL) { card->creg_ctrl.creg_stats.creg_timeout++; dev_warn(CARD_TO_DEV(card), @@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work) if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) card->creg_ctrl.creg_stats.failed_cancel_timer++; - cmd = pop_active_cmd(card); + spin_lock_bh(&card->creg_ctrl.lock); + cmd = card->creg_ctrl.active_cmd; + card->creg_ctrl.active_cmd = NULL; + spin_unlock_bh(&card->creg_ctrl.lock); + if (cmd == NULL) { dev_err(CARD_TO_DEV(card), "Spurious creg interrupt!\n"); @@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work) goto creg_done; } - copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); + st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); } creg_done: @@ -296,10 +302,10 @@ creg_done: kmem_cache_free(creg_cmd_pool, cmd); - spin_lock(&card->creg_ctrl.lock); + spin_lock_bh(&card->creg_ctrl.lock); card->creg_ctrl.active = 0; creg_kick_queue(card); - spin_unlock(&card->creg_ctrl.lock); + spin_unlock_bh(&card->creg_ctrl.lock); } static void creg_reset(struct rsxx_cardinfo *card) @@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card) "Resetting creg interface for recovery\n"); /* Cancel outstanding commands */ - spin_lock(&card->creg_ctrl.lock); + spin_lock_bh(&card->creg_ctrl.lock); list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { list_del(&cmd->list); card->creg_ctrl.q_depth--; @@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card) card->creg_ctrl.active = 0; } - spin_unlock(&card->creg_ctrl.lock); + spin_unlock_bh(&card->creg_ctrl.lock); card->creg_ctrl.reset = 0; spin_lock_irqsave(&card->irq_lock, flags); @@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card, return st; /* - * This timeout is neccessary for unresponsive hardware. The additional + * This timeout is necessary for unresponsive hardware. The additional * 20 seconds to used to guarantee that each cregs requests has time to * complete. */ - timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * - card->creg_ctrl.q_depth) + 20000); + timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC * + card->creg_ctrl.q_depth + 20000); /* * The creg interface is guaranteed to complete. It has a timeout @@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card, return 0; } +void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card) +{ + struct creg_cmd *cmd = NULL; + + cmd = card->creg_ctrl.active_cmd; + card->creg_ctrl.active_cmd = NULL; + + if (cmd) { + del_timer_sync(&card->creg_ctrl.cmd_timer); + + spin_lock_bh(&card->creg_ctrl.lock); + list_add(&cmd->list, &card->creg_ctrl.queue); + card->creg_ctrl.q_depth++; + card->creg_ctrl.active = 0; + spin_unlock_bh(&card->creg_ctrl.lock); + } +} + +void rsxx_kick_creg_queue(struct rsxx_cardinfo *card) +{ + spin_lock_bh(&card->creg_ctrl.lock); + if (!list_empty(&card->creg_ctrl.queue)) + creg_kick_queue(card); + spin_unlock_bh(&card->creg_ctrl.lock); +} + /*------------ Initialization & Setup --------------*/ int rsxx_creg_setup(struct rsxx_cardinfo *card) { @@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card) int cnt = 0; /* Cancel outstanding commands */ - spin_lock(&card->creg_ctrl.lock); + spin_lock_bh(&card->creg_ctrl.lock); list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { list_del(&cmd->list); if (cmd->cb) @@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card) "Canceled active creg command\n"); kmem_cache_free(creg_cmd_pool, cmd); } - spin_unlock(&card->creg_ctrl.lock); + spin_unlock_bh(&card->creg_ctrl.lock); cancel_work_sync(&card->creg_ctrl.done_work); } diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 63176e67662f..0607513cfb41 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -28,7 +28,7 @@ struct rsxx_dma { struct list_head list; u8 cmd; - unsigned int laddr; /* Logical address on the ramsan */ + unsigned int laddr; /* Logical address */ struct { u32 off; u32 cnt; @@ -81,9 +81,6 @@ enum rsxx_hw_status { HW_STATUS_FAULT = 0x08, }; -#define STATUS_BUFFER_SIZE8 4096 -#define COMMAND_BUFFER_SIZE8 4096 - static struct kmem_cache *rsxx_dma_pool; struct dma_tracker { @@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) return tgt; } -static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) +void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) { /* Reset all DMA Command/Status Queues */ iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); @@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) u32 q_depth = 0; u32 intr_coal; - if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) + if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || + unlikely(card->eeh_state)) return; for (i = 0; i < card->n_targets; i++) @@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) } /*----------------- RSXX DMA Handling -------------------*/ -static void rsxx_complete_dma(struct rsxx_cardinfo *card, +static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma, unsigned int status) { if (status & DMA_SW_ERR) - printk_ratelimited(KERN_ERR - "SW Error in DMA(cmd x%02x, laddr x%08x)\n", - dma->cmd, dma->laddr); + ctrl->stats.dma_sw_err++; if (status & DMA_HW_FAULT) - printk_ratelimited(KERN_ERR - "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", - dma->cmd, dma->laddr); + ctrl->stats.dma_hw_fault++; if (status & DMA_CANCELLED) - printk_ratelimited(KERN_ERR - "DMA Cancelled(cmd x%02x, laddr x%08x)\n", - dma->cmd, dma->laddr); + ctrl->stats.dma_cancelled++; if (dma->dma_addr) - pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), + pci_unmap_page(ctrl->card->dev, dma->dma_addr, + get_dma_size(dma), dma->cmd == HW_CMD_BLK_WRITE ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (dma->cb) - dma->cb(card, dma->cb_data, status ? 1 : 0); + dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); kmem_cache_free(rsxx_dma_pool, dma); } @@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, if (requeue_cmd) rsxx_requeue_dma(ctrl, dma); else - rsxx_complete_dma(ctrl->card, dma, status); + rsxx_complete_dma(ctrl, dma, status); } static void dma_engine_stalled(unsigned long data) { struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; - if (atomic_read(&ctrl->stats.hw_q_depth) == 0) + if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || + unlikely(ctrl->card->eeh_state)) return; if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { @@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work) ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); hw_cmd_buf = ctrl->cmd.buf; - if (unlikely(ctrl->card->halt)) + if (unlikely(ctrl->card->halt) || + unlikely(ctrl->card->eeh_state)) return; while (1) { @@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work) */ if (unlikely(ctrl->card->dma_fault)) { push_tracker(ctrl->trackers, tag); - rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); continue; } @@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work) /* Let HW know we've queued commands. */ if (cmds_pending) { - /* - * We must guarantee that the CPU writes to 'ctrl->cmd.buf' - * (which is in PCI-consistent system-memory) from the loop - * above make it into the coherency domain before the - * following PIO "trigger" updating the cmd.idx. A WMB is - * sufficient. We need not explicitly CPU cache-flush since - * the memory is a PCI-consistent (ie; coherent) mapping. - */ - wmb(); - atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); mod_timer(&ctrl->activity_timer, jiffies + DMA_ACTIVITY_TIMEOUT); + + if (unlikely(ctrl->card->eeh_state)) { + del_timer_sync(&ctrl->activity_timer); + return; + } + iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); } } @@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work) hw_st_buf = ctrl->status.buf; if (unlikely(ctrl->card->halt) || - unlikely(ctrl->card->dma_fault)) + unlikely(ctrl->card->dma_fault) || + unlikely(ctrl->card->eeh_state)) return; count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); @@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work) if (status) rsxx_handle_dma_error(ctrl, dma, status); else - rsxx_complete_dma(ctrl->card, dma, 0); + rsxx_complete_dma(ctrl, dma, 0); push_tracker(ctrl->trackers, tag); @@ -727,20 +719,54 @@ bvec_err: /*----------------- DMA Engine Initialization & Setup -------------------*/ +int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) +{ + ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, + &ctrl->status.dma_addr); + ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, + &ctrl->cmd.dma_addr); + if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) + return -ENOMEM; + + memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); + iowrite32(lower_32_bits(ctrl->status.dma_addr), + ctrl->regmap + SB_ADD_LO); + iowrite32(upper_32_bits(ctrl->status.dma_addr), + ctrl->regmap + SB_ADD_HI); + + memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); + iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); + iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); + + ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); + if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { + dev_crit(&dev->dev, "Failed reading status cnt x%x\n", + ctrl->status.idx); + return -EINVAL; + } + iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); + iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); + + ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); + if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { + dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", + ctrl->status.idx); + return -EINVAL; + } + iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); + iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); + + return 0; +} + static int rsxx_dma_ctrl_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) { int i; + int st; memset(&ctrl->stats, 0, sizeof(ctrl->stats)); - ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, - &ctrl->status.dma_addr); - ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, - &ctrl->cmd.dma_addr); - if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) - return -ENOMEM; - ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); if (!ctrl->trackers) return -ENOMEM; @@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); - memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); - iowrite32(lower_32_bits(ctrl->status.dma_addr), - ctrl->regmap + SB_ADD_LO); - iowrite32(upper_32_bits(ctrl->status.dma_addr), - ctrl->regmap + SB_ADD_HI); - - memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); - iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); - iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); - - ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); - if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { - dev_crit(&dev->dev, "Failed reading status cnt x%x\n", - ctrl->status.idx); - return -EINVAL; - } - iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); - iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); - - ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); - if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { - dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", - ctrl->status.idx); - return -EINVAL; - } - iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); - iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); - - wmb(); + st = rsxx_hw_buffers_init(dev, ctrl); + if (st) + return st; return 0; } @@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, return 0; } -static int rsxx_dma_configure(struct rsxx_cardinfo *card) +int rsxx_dma_configure(struct rsxx_cardinfo *card) { u32 intr_coal; @@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) } } +int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) +{ + int i; + int j; + int cnt; + struct rsxx_dma *dma; + struct list_head *issued_dmas; + + issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, + GFP_KERNEL); + if (!issued_dmas) + return -ENOMEM; + + for (i = 0; i < card->n_targets; i++) { + INIT_LIST_HEAD(&issued_dmas[i]); + cnt = 0; + for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { + dma = get_tracker_dma(card->ctrl[i].trackers, j); + if (dma == NULL) + continue; + + if (dma->cmd == HW_CMD_BLK_WRITE) + card->ctrl[i].stats.writes_issued--; + else if (dma->cmd == HW_CMD_BLK_DISCARD) + card->ctrl[i].stats.discards_issued--; + else + card->ctrl[i].stats.reads_issued--; + + list_add_tail(&dma->list, &issued_dmas[i]); + push_tracker(card->ctrl[i].trackers, j); + cnt++; + } + + spin_lock(&card->ctrl[i].queue_lock); + list_splice(&issued_dmas[i], &card->ctrl[i].queue); + + atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); + card->ctrl[i].stats.sw_q_depth += cnt; + card->ctrl[i].e_cnt = 0; + + list_for_each_entry(dma, &card->ctrl[i].queue, list) { + if (dma->dma_addr) + pci_unmap_page(card->dev, dma->dma_addr, + get_dma_size(dma), + dma->cmd == HW_CMD_BLK_WRITE ? + PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE); + } + spin_unlock(&card->ctrl[i].queue_lock); + } + + kfree(issued_dmas); + + return 0; +} + +void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) +{ + struct rsxx_dma *dma; + struct rsxx_dma *tmp; + int i; + + for (i = 0; i < card->n_targets; i++) { + spin_lock(&card->ctrl[i].queue_lock); + list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { + list_del(&dma->list); + + rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); + } + spin_unlock(&card->ctrl[i].queue_lock); + } +} + +int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) +{ + struct rsxx_dma *dma; + int i; + + for (i = 0; i < card->n_targets; i++) { + spin_lock(&card->ctrl[i].queue_lock); + list_for_each_entry(dma, &card->ctrl[i].queue, list) { + dma->dma_addr = pci_map_page(card->dev, dma->page, + dma->pg_off, get_dma_size(dma), + dma->cmd == HW_CMD_BLK_WRITE ? + PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE); + if (!dma->dma_addr) { + spin_unlock(&card->ctrl[i].queue_lock); + kmem_cache_free(rsxx_dma_pool, dma); + return -ENOMEM; + } + } + spin_unlock(&card->ctrl[i].queue_lock); + } + + return 0; +} int rsxx_dma_init(void) { diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h index 2e50b65902b7..24ba3642bd89 100644 --- a/drivers/block/rsxx/rsxx.h +++ b/drivers/block/rsxx/rsxx.h @@ -27,15 +27,17 @@ /*----------------- IOCTL Definitions -------------------*/ +#define RSXX_MAX_DATA 8 + struct rsxx_reg_access { __u32 addr; __u32 cnt; __u32 stat; __u32 stream; - __u32 data[8]; + __u32 data[RSXX_MAX_DATA]; }; -#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) +#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32))) #define RSXX_IOC_MAGIC 'r' diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h index c025fe5fdb70..f384c943846d 100644 --- a/drivers/block/rsxx/rsxx_cfg.h +++ b/drivers/block/rsxx/rsxx_cfg.h @@ -58,7 +58,7 @@ struct rsxx_card_cfg { }; /* Vendor ID Values */ -#define RSXX_VENDOR_ID_TMS_IBM 0 +#define RSXX_VENDOR_ID_IBM 0 #define RSXX_VENDOR_ID_DSI 1 #define RSXX_VENDOR_COUNT 2 diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index a1ac907d8f4c..382e8bf5c03b 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h @@ -45,16 +45,13 @@ struct proc_cmd; -#define PCI_VENDOR_ID_TMS_IBM 0x15B6 -#define PCI_DEVICE_ID_RS70_FLASH 0x0019 -#define PCI_DEVICE_ID_RS70D_FLASH 0x001A -#define PCI_DEVICE_ID_RS80_FLASH 0x001C -#define PCI_DEVICE_ID_RS81_FLASH 0x001E +#define PCI_DEVICE_ID_FS70_FLASH 0x04A9 +#define PCI_DEVICE_ID_FS80_FLASH 0x04AA #define RS70_PCI_REV_SUPPORTED 4 #define DRIVER_NAME "rsxx" -#define DRIVER_VERSION "3.7" +#define DRIVER_VERSION "4.0" /* Block size is 4096 */ #define RSXX_HW_BLK_SHIFT 12 @@ -67,6 +64,9 @@ struct proc_cmd; #define RSXX_MAX_OUTSTANDING_CMDS 255 #define RSXX_CS_IDX_MASK 0xff +#define STATUS_BUFFER_SIZE8 4096 +#define COMMAND_BUFFER_SIZE8 4096 + #define RSXX_MAX_TARGETS 8 struct dma_tracker_list; @@ -91,6 +91,9 @@ struct rsxx_dma_stats { u32 discards_failed; u32 done_rescheduled; u32 issue_rescheduled; + u32 dma_sw_err; + u32 dma_hw_fault; + u32 dma_cancelled; u32 sw_q_depth; /* Number of DMAs on the SW queue. */ atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ }; @@ -116,6 +119,7 @@ struct rsxx_dma_ctrl { struct rsxx_cardinfo { struct pci_dev *dev; unsigned int halt; + unsigned int eeh_state; void __iomem *regmap; spinlock_t irq_lock; @@ -224,6 +228,7 @@ enum rsxx_pci_regmap { PERF_RD512_HI = 0xac, PERF_WR512_LO = 0xb0, PERF_WR512_HI = 0xb4, + PCI_RECONFIG = 0xb8, }; enum rsxx_intr { @@ -237,6 +242,8 @@ enum rsxx_intr { CR_INTR_DMA5 = 0x00000080, CR_INTR_DMA6 = 0x00000100, CR_INTR_DMA7 = 0x00000200, + CR_INTR_ALL_C = 0x0000003f, + CR_INTR_ALL_G = 0x000003ff, CR_INTR_DMA_ALL = 0x000003f5, CR_INTR_ALL = 0xffffffff, }; @@ -253,8 +260,14 @@ enum rsxx_pci_reset { DMA_QUEUE_RESET = 0x00000001, }; +enum rsxx_hw_fifo_flush { + RSXX_FLUSH_BUSY = 0x00000002, + RSXX_FLUSH_TIMEOUT = 0x00000004, +}; + enum rsxx_pci_revision { RSXX_DISCARD_SUPPORT = 2, + RSXX_EEH_SUPPORT = 3, }; enum rsxx_creg_cmd { @@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card); void rsxx_dma_destroy(struct rsxx_cardinfo *card); int rsxx_dma_init(void); void rsxx_dma_cleanup(void); +void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); +int rsxx_dma_configure(struct rsxx_cardinfo *card); int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, struct bio *bio, atomic_t *n_dmas, rsxx_dma_cb cb, void *cb_data); +int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); +int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); +void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); +int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); /***** cregs.c *****/ int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, @@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card); void rsxx_creg_destroy(struct rsxx_cardinfo *card); int rsxx_creg_init(void); void rsxx_creg_cleanup(void); - int rsxx_reg_access(struct rsxx_cardinfo *card, struct rsxx_reg_access __user *ucmd, int read); +void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card); +void rsxx_kick_creg_queue(struct rsxx_cardinfo *card); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index de1f319f7bd7..dd5b2fed97e9 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id, #define foreach_grant_safe(pos, n, rbtree, node) \ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ - (n) = rb_next(&(pos)->node); \ + (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ &(pos)->node != NULL; \ (pos) = container_of(n, typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) @@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) static void print_stats(struct xen_blkif *blkif) { - pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" - " | ds %4d\n", + pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" + " | ds %4llu\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req, blkif->st_ds_req); @@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg) } struct seg_buf { - unsigned long buf; + unsigned int offset; unsigned int nsec; }; /* @@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req, * If this is a new persistent grant * save the handler */ - persistent_gnts[i]->handle = map[j].handle; - persistent_gnts[i]->dev_bus_addr = - map[j++].dev_bus_addr; + persistent_gnts[i]->handle = map[j++].handle; } pending_handle(pending_req, i) = persistent_gnts[i]->handle; if (ret) continue; - - seg[i].buf = persistent_gnts[i]->dev_bus_addr | - (req->u.rw.seg[i].first_sect << 9); } else { - pending_handle(pending_req, i) = map[j].handle; + pending_handle(pending_req, i) = map[j++].handle; bitmap_set(pending_req->unmap_seg, i, 1); - if (ret) { - j++; + if (ret) continue; - } - - seg[i].buf = map[j++].dev_bus_addr | - (req->u.rw.seg[i].first_sect << 9); } + seg[i].offset = (req->u.rw.seg[i].first_sect << 9); } return ret; } @@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif, return err; } +static int dispatch_other_io(struct xen_blkif *blkif, + struct blkif_request *req, + struct pending_req *pending_req) +{ + free_req(pending_req); + make_response(blkif, req->u.other.id, req->operation, + BLKIF_RSP_EOPNOTSUPP); + return -EIO; +} + static void xen_blk_drain_io(struct xen_blkif *blkif) { atomic_set(&blkif->drain, 1); @@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif) /* Apply all sanity checks to /private copy/ of request. */ barrier(); - if (unlikely(req.operation == BLKIF_OP_DISCARD)) { + + switch (req.operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: + case BLKIF_OP_FLUSH_DISKCACHE: + if (dispatch_rw_block_io(blkif, &req, pending_req)) + goto done; + break; + case BLKIF_OP_DISCARD: free_req(pending_req); if (dispatch_discard_io(blkif, &req)) - break; - } else if (dispatch_rw_block_io(blkif, &req, pending_req)) + goto done; break; + default: + if (dispatch_other_io(blkif, &req, pending_req)) + goto done; + break; + } /* Yield point for this unbounded loop. */ cond_resched(); } - +done: return more_to_do; } @@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", operation == READ ? "read" : "write", preq.sector_number, - preq.sector_number + preq.nr_sects, preq.dev); + preq.sector_number + preq.nr_sects, + blkif->vbd.pdevice); goto fail_response; } @@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, (bio_add_page(bio, pages[i], seg[i].nsec << 9, - seg[i].buf & ~PAGE_MASK) == 0)) { + seg[i].offset) == 0)) { bio = bio_alloc(GFP_KERNEL, nseg-i); if (unlikely(bio == NULL)) @@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, bio->bi_end_io = end_block_io_op; } - /* - * We set it one so that the last submit_bio does not have to call - * atomic_inc. - */ atomic_set(&pending_req->pendcnt, nbio); - - /* Get a reference count for the disk queue and start sending I/O */ blk_start_plug(&plug); for (i = 0; i < nbio; i++) @@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); + atomic_set(&pending_req->pendcnt, 1); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 6072390c7f57..60103e2517ba 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard { uint64_t nr_sectors; } __attribute__((__packed__)); +struct blkif_x86_32_request_other { + uint8_t _pad1; + blkif_vdev_t _pad2; + uint64_t id; /* private guest value, echoed in resp */ +} __attribute__((__packed__)); + struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_x86_32_request_rw rw; struct blkif_x86_32_request_discard discard; + struct blkif_x86_32_request_other other; } u; } __attribute__((__packed__)); @@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard { uint64_t nr_sectors; } __attribute__((__packed__)); +struct blkif_x86_64_request_other { + uint8_t _pad1; + blkif_vdev_t _pad2; + uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ + uint64_t id; /* private guest value, echoed in resp */ +} __attribute__((__packed__)); + struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_x86_64_request_rw rw; struct blkif_x86_64_request_discard discard; + struct blkif_x86_64_request_other other; } u; } __attribute__((__packed__)); @@ -172,7 +187,6 @@ struct persistent_gnt { struct page *page; grant_ref_t gnt; grant_handle_t handle; - uint64_t dev_bus_addr; struct rb_node node; }; @@ -208,13 +222,13 @@ struct xen_blkif { /* statistics */ unsigned long st_print; - int st_rd_req; - int st_wr_req; - int st_oo_req; - int st_f_req; - int st_ds_req; - int st_rd_sect; - int st_wr_sect; + unsigned long long st_rd_req; + unsigned long long st_wr_req; + unsigned long long st_oo_req; + unsigned long long st_f_req; + unsigned long long st_ds_req; + unsigned long long st_rd_sect; + unsigned long long st_wr_sect; wait_queue_head_t waiting_to_free; }; @@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, dst->u.discard.nr_sectors = src->u.discard.nr_sectors; break; default: + /* + * Don't know how to translate this op. Only get the + * ID so failure can be reported to the frontend. + */ + dst->u.other.id = src->u.other.id; break; } } @@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, dst->u.discard.nr_sectors = src->u.discard.nr_sectors; break; default: + /* + * Don't know how to translate this op. Only get the + * ID so failure can be reported to the frontend. + */ + dst->u.other.id = src->u.other.id; break; } } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 5e237f630c47..8bfd1bcf95ec 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void) } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) -VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); -VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); -VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); -VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); -VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); -VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); -VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); +VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req); +VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req); +VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req); +VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req); +VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req); +VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect); +VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect); static struct attribute *xen_vbdstat_attrs[] = { &dev_attr_oo_req.attr, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c3dae2e0f290..a894f88762d8 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -44,7 +44,7 @@ #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/bitmap.h> -#include <linux/llist.h> +#include <linux/list.h> #include <xen/xen.h> #include <xen/xenbus.h> @@ -68,13 +68,12 @@ enum blkif_state { struct grant { grant_ref_t gref; unsigned long pfn; - struct llist_node node; + struct list_head node; }; struct blk_shadow { struct blkif_request req; struct request *request; - unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; @@ -105,7 +104,7 @@ struct blkfront_info struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; - struct llist_head persistent_gnts; + struct list_head persistent_gnts; unsigned int persistent_gnts_c; unsigned long shadow_free; unsigned int feature_flush; @@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info, return 0; } +static int fill_grant_buffer(struct blkfront_info *info, int num) +{ + struct page *granted_page; + struct grant *gnt_list_entry, *n; + int i = 0; + + while(i < num) { + gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); + if (!gnt_list_entry) + goto out_of_memory; + + granted_page = alloc_page(GFP_NOIO); + if (!granted_page) { + kfree(gnt_list_entry); + goto out_of_memory; + } + + gnt_list_entry->pfn = page_to_pfn(granted_page); + gnt_list_entry->gref = GRANT_INVALID_REF; + list_add(&gnt_list_entry->node, &info->persistent_gnts); + i++; + } + + return 0; + +out_of_memory: + list_for_each_entry_safe(gnt_list_entry, n, + &info->persistent_gnts, node) { + list_del(&gnt_list_entry->node); + __free_page(pfn_to_page(gnt_list_entry->pfn)); + kfree(gnt_list_entry); + i--; + } + BUG_ON(i != 0); + return -ENOMEM; +} + +static struct grant *get_grant(grant_ref_t *gref_head, + struct blkfront_info *info) +{ + struct grant *gnt_list_entry; + unsigned long buffer_mfn; + + BUG_ON(list_empty(&info->persistent_gnts)); + gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, + node); + list_del(&gnt_list_entry->node); + + if (gnt_list_entry->gref != GRANT_INVALID_REF) { + info->persistent_gnts_c--; + return gnt_list_entry; + } + + /* Assign a gref to this page */ + gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); + BUG_ON(gnt_list_entry->gref == -ENOSPC); + buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); + gnttab_grant_foreign_access_ref(gnt_list_entry->gref, + info->xbdev->otherend_id, + buffer_mfn, 0); + return gnt_list_entry; +} + static const char *op_name(int op) { static const char *const names[] = { @@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; - unsigned long buffer_mfn; struct blkif_request *ring_req; unsigned long id; unsigned int fsect, lsect; @@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req) */ bool new_persistent_gnts; grant_ref_t gref_head; - struct page *granted_page; struct grant *gnt_list_entry = NULL; struct scatterlist *sg; @@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req) fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; - if (info->persistent_gnts_c) { - BUG_ON(llist_empty(&info->persistent_gnts)); - gnt_list_entry = llist_entry( - llist_del_first(&info->persistent_gnts), - struct grant, node); - - ref = gnt_list_entry->gref; - buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); - info->persistent_gnts_c--; - } else { - ref = gnttab_claim_grant_reference(&gref_head); - BUG_ON(ref == -ENOSPC); - - gnt_list_entry = - kmalloc(sizeof(struct grant), - GFP_ATOMIC); - if (!gnt_list_entry) - return -ENOMEM; - - granted_page = alloc_page(GFP_ATOMIC); - if (!granted_page) { - kfree(gnt_list_entry); - return -ENOMEM; - } - - gnt_list_entry->pfn = - page_to_pfn(granted_page); - gnt_list_entry->gref = ref; - - buffer_mfn = pfn_to_mfn(page_to_pfn( - granted_page)); - gnttab_grant_foreign_access_ref(ref, - info->xbdev->otherend_id, - buffer_mfn, 0); - } + gnt_list_entry = get_grant(&gref_head, info); + ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; @@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req) kunmap_atomic(shared_data); } - info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, @@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work) static void blkif_free(struct blkfront_info *info, int suspend) { - struct llist_node *all_gnts; - struct grant *persistent_gnt, *tmp; - struct llist_node *n; + struct grant *persistent_gnt; + struct grant *n; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); @@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend) blk_stop_queue(info->rq); /* Remove all persistent grants */ - if (info->persistent_gnts_c) { - all_gnts = llist_del_all(&info->persistent_gnts); - persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); - while (persistent_gnt) { - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); + if (!list_empty(&info->persistent_gnts)) { + list_for_each_entry_safe(persistent_gnt, n, + &info->persistent_gnts, node) { + list_del(&persistent_gnt->node); + if (persistent_gnt->gref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(persistent_gnt->gref, + 0, 0UL); + info->persistent_gnts_c--; + } __free_page(pfn_to_page(persistent_gnt->pfn)); - tmp = persistent_gnt; - n = persistent_gnt->node.next; - if (n) - persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node); - else - persistent_gnt = NULL; - kfree(tmp); + kfree(persistent_gnt); } - info->persistent_gnts_c = 0; } + BUG_ON(info->persistent_gnts_c != 0); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); @@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < s->req.u.rw.nr_segments; i++) { - llist_add(&s->grants_used[i]->node, &info->persistent_gnts); + list_add(&s->grants_used[i]->node, &info->persistent_gnts); info->persistent_gnts_c++; } } @@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev, sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); + /* Allocate memory for grants */ + err = fill_grant_buffer(info, BLK_RING_SIZE * + BLKIF_MAX_SEGMENTS_PER_REQUEST); + if (err) + goto fail; + err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); @@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev, spin_lock_init(&info->io_lock); info->xbdev = dev; info->vdevice = vdevice; - init_llist_head(&info->persistent_gnts); + INIT_LIST_HEAD(&info->persistent_gnts); info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); @@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info) int j; /* Stage 1: Make a safe copy of the shadow state. */ - copy = kmalloc(sizeof(info->shadow), + copy = kmemdup(info->shadow, sizeof(info->shadow), GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); if (!copy) return -ENOMEM; - memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); @@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info) gnttab_grant_foreign_access_ref( req->u.rw.seg[j].gref, info->xbdev->otherend_id, - pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), + pfn_to_mfn(copy[i].grants_used[j]->pfn), 0); } info->shadow[req->u.rw.id].req = *req; diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a8a41e07a221..6aab00ef4379 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -73,9 +73,13 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, + { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, + { USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3006) }, { USB_DEVICE(0x04CA, 0x3008) }, @@ -105,9 +109,13 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7e351e345476..2cc5f774a29c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -131,9 +131,13 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ + { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e3f9a99b8522..d784650d14f0 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) struct hpet_dev *devp; unsigned long addr; - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) - return -EINVAL; - devp = file->private_data; addr = devp->hd_hpets->hp_hpet_phys; if (addr & (PAGE_SIZE - 1)) return -ENOSYS; - vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) { - printk(KERN_ERR "%s: io_remap_pfn_range failed\n", - __func__); - return -EAGAIN; - } - - return 0; + return vm_iomap_memory(vma, addr, PAGE_SIZE); #else return -ENOSYS; #endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713c..a0f7724852eb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng) } EXPORT_SYMBOL_GPL(hwrng_unregister); +static void __exit hwrng_exit(void) +{ + mutex_lock(&rng_mutex); + BUG_ON(current_rng); + kfree(rng_buffer); + mutex_unlock(&rng_mutex); +} + +module_exit(hwrng_exit); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 10fd71ccf587..6bf4d47324eb 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -92,14 +92,22 @@ static int probe_common(struct virtio_device *vdev) { int err; + if (vq) { + /* We only support one device for now */ + return -EBUSY; + } /* We expect a single virtqueue. */ vq = virtio_find_single_vq(vdev, random_recv_done, "input"); - if (IS_ERR(vq)) - return PTR_ERR(vq); + if (IS_ERR(vq)) { + err = PTR_ERR(vq); + vq = NULL; + return err; + } err = hwrng_register(&virtio_hwrng); if (err) { vdev->config->del_vqs(vdev); + vq = NULL; return err; } @@ -112,6 +120,7 @@ static void remove_common(struct virtio_device *vdev) busy = false; hwrng_unregister(&virtio_hwrng); vdev->config->del_vqs(vdev); + vq = NULL; } static int virtrng_probe(struct virtio_device *vdev) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f53051..ce5f3fc25d6d 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -149,7 +149,8 @@ struct ports_device { spinlock_t ports_lock; /* To protect the vq operations for the control channel */ - spinlock_t cvq_lock; + spinlock_t c_ivq_lock; + spinlock_t c_ovq_lock; /* The current config space is stored here */ struct virtio_console_config config; @@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, vq = portdev->c_ovq; sg_init_one(sg, &cpkt, sizeof(cpkt)); + + spin_lock(&portdev->c_ovq_lock); if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { virtqueue_kick(vq); while (!virtqueue_get_buf(vq, &len)) cpu_relax(); } + spin_unlock(&portdev->c_ovq_lock); return 0; } @@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id) * rproc_serial does not want the console port, only * the generic port implementation. */ - port->host_connected = port->guest_connected = true; + port->host_connected = true; else if (!use_multiport(port->portdev)) { /* * If we're not using multiport support, @@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work) portdev = container_of(work, struct ports_device, control_work); vq = portdev->c_ivq; - spin_lock(&portdev->cvq_lock); + spin_lock(&portdev->c_ivq_lock); while ((buf = virtqueue_get_buf(vq, &len))) { - spin_unlock(&portdev->cvq_lock); + spin_unlock(&portdev->c_ivq_lock); buf->len = len; buf->offset = 0; handle_control_message(portdev, buf); - spin_lock(&portdev->cvq_lock); + spin_lock(&portdev->c_ivq_lock); if (add_inbuf(portdev->c_ivq, buf) < 0) { dev_warn(&portdev->vdev->dev, "Error adding buffer to queue\n"); free_buf(buf, false); } } - spin_unlock(&portdev->cvq_lock); + spin_unlock(&portdev->c_ivq_lock); } static void out_intr(struct virtqueue *vq) @@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq) port->inbuf = get_inbuf(port); /* - * Don't queue up data when port is closed. This condition + * Normally the port should not accept data when the port is + * closed. For generic serial ports, the host won't (shouldn't) + * send data till the guest is connected. But this condition * can be reached when a console port is not yet connected (no - * tty is spawned) and the host sends out data to console - * ports. For generic serial ports, the host won't - * (shouldn't) send data till the guest is connected. + * tty is spawned) and the other side sends out data over the + * vring, or when a remote devices start sending data before + * the ports are opened. + * + * A generic serial port will discard data if not connected, + * while console ports and rproc-serial ports accepts data at + * any time. rproc-serial is initiated with guest_connected to + * false because port_fops_open expects this. Console ports are + * hooked up with an HVC console and is initialized with + * guest_connected to true. */ - if (!port->guest_connected) + + if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) discard_port_data(port); spin_unlock_irqrestore(&port->inbuf_lock, flags); @@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev) if (multiport) { unsigned int nr_added_bufs; - spin_lock_init(&portdev->cvq_lock); + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); INIT_WORK(&portdev->control_work, &control_work_handler); - nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); + nr_added_bufs = fill_queue(portdev->c_ivq, + &portdev->c_ivq_lock); if (!nr_added_bufs) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); @@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev) return ret; if (use_multiport(portdev)) - fill_queue(portdev->c_ivq, &portdev->cvq_lock); + fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); list_for_each_entry(port, &portdev->ports, list) { port->in_vq = portdev->in_vqs[port->id]; diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c index b5538bba7a10..09c63315e579 100644 --- a/drivers/clk/clk-vt8500.c +++ b/drivers/clk/clk-vt8500.c @@ -157,7 +157,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate, divisor = parent_rate / rate; /* If prate / rate would be decimal, incr the divisor */ - if (rate * divisor < *prate) + if (rate * divisor < parent_rate) divisor++; if (divisor == cdev->div_mask + 1) diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 143ce1f899ad..f873dcefe0de 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -703,7 +703,7 @@ static void tegra20_pll_init(void) clks[pll_a_out0] = clk; /* PLLE */ - clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, + clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base, 0, 100000000, &pll_e_params, 0, pll_e_freq_table, NULL); clk_register_clkdev(clk, "pll_e", NULL); @@ -1292,7 +1292,6 @@ static struct tegra_clk_duplicate tegra_clk_duplicates[] = { TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL), TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL), TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"), - TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL), TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */ }; diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 32c61cb6d0bb..ba6f51bc9f3b 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -1931,7 +1931,6 @@ static struct tegra_clk_duplicate tegra_clk_duplicates[] = { TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL), TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"), TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"), - TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL), TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"), TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */ }; diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 937bc286591f..57a8774f0b4e 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { cpumask_copy(policy->cpus, perf->shared_cpu_map); } - cpumask_copy(policy->related_cpus, perf->shared_cpu_map); #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); @@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); - cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu)); policy->shared_type = CPUFREQ_SHARED_TYPE_HW; pr_info_once(PFX "overriding BIOS provided _PSD data\n"); } diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927c..37d23a0f8c56 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { static int cpu0_cpufreq_probe(struct platform_device *pdev) { - struct device_node *np; + struct device_node *np, *parent; int ret; - for_each_child_of_node(of_find_node_by_path("/cpus"), np) { + parent = of_find_node_by_path("/cpus"); + if (!parent) { + pr_err("failed to find OF /cpus\n"); + return -ENOENT; + } + + for_each_child_of_node(parent, np) { if (of_get_property(np, "operating-points", NULL)) break; } diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee62..cc4bd2f6838a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -14,8 +14,8 @@ * published by the Free Software Foundation. */ -#ifndef _CPUFREQ_GOVERNER_H -#define _CPUFREQ_GOVERNER_H +#ifndef _CPUFREQ_GOVERNOR_H +#define _CPUFREQ_GOVERNOR_H #include <linux/cpufreq.h> #include <linux/kobject.h> @@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate); int cpufreq_governor_dbs(struct dbs_data *dbs_data, struct cpufreq_policy *policy, unsigned int event); -#endif /* _CPUFREQ_GOVERNER_H */ +#endif /* _CPUFREQ_GOVERNOR_H */ diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 2fd779eb1ed1..bfd6273fd873 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (!cpufreq_frequency_get_table(cpu)) + if (!policy) return; - if (policy && !policy_is_shared(policy)) { + if (!cpufreq_frequency_get_table(cpu)) + goto put_ref; + + if (!policy_is_shared(policy)) { pr_debug("%s: Free sysfs stat\n", __func__); sysfs_remove_group(&policy->kobj, &stats_attr_group); } - if (policy) - cpufreq_cpu_put(policy); + +put_ref: + cpufreq_cpu_put(policy); } static int cpufreq_stats_create_table(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f6dd1e761129..6133ef5cf671 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void) static int intel_pstate_min_pstate(void) { u64 value; - rdmsrl(0xCE, value); + rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 40) & 0xFF; } static int intel_pstate_max_pstate(void) { u64 value; - rdmsrl(0xCE, value); + rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 8) & 0xFF; } @@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void) { u64 value; int nont, ret; - rdmsrl(0x1AD, value); + rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); nont = intel_pstate_max_pstate(); ret = ((value) & 255); if (ret <= nont) @@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, sample->idletime_us * 100, sample->duration_us); core_pct = div64_u64(sample->aperf * 100, sample->mperf); - sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000; + sample->freq = cpu->pstate.max_pstate * core_pct * 1000; sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 100); @@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) sample_time = cpu->pstate_policy->sample_rate_ms; delay = msecs_to_jiffies(sample_time); - delay -= jiffies % delay; mod_timer_pinned(&cpu->timer, jiffies + delay); } @@ -752,6 +751,29 @@ static struct cpufreq_driver intel_pstate_driver = { static int __initdata no_load; +static int intel_pstate_msrs_not_valid(void) +{ + /* Check that all the msr's we are using are valid. */ + u64 aperf, mperf, tmp; + + rdmsrl(MSR_IA32_APERF, aperf); + rdmsrl(MSR_IA32_MPERF, mperf); + + if (!intel_pstate_min_pstate() || + !intel_pstate_max_pstate() || + !intel_pstate_turbo_pstate()) + return -ENODEV; + + rdmsrl(MSR_IA32_APERF, tmp); + if (!(tmp - aperf)) + return -ENODEV; + + rdmsrl(MSR_IA32_MPERF, tmp); + if (!(tmp - mperf)) + return -ENODEV; + + return 0; +} static int __init intel_pstate_init(void) { int cpu, rc = 0; @@ -764,6 +786,9 @@ static int __init intel_pstate_init(void) if (!id) return -ENODEV; + if (intel_pstate_msrs_not_valid()) + return -ENODEV; + pr_info("Intel P-state driver initializing.\n"); all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b2a0a0726a54..cf268b14ae9a 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1650,11 +1650,7 @@ struct caam_alg_template { }; static struct caam_alg_template driver_algs[] = { - /* - * single-pass ipsec_esp descriptor - * authencesn(*,*) is also registered, although not present - * explicitly here. - */ + /* single-pass ipsec_esp descriptor */ { .name = "authenc(hmac(md5),cbc(aes))", .driver_name = "authenc-hmac-md5-cbc-aes-caam", @@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { /* TODO: check if h/w supports alg */ struct caam_crypto_alg *t_alg; - bool done = false; -authencesn: t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); @@ -2233,25 +2227,8 @@ authencesn: dev_warn(ctrldev, "%s alg registration failed\n", t_alg->crypto_alg.cra_driver_name); kfree(t_alg); - } else { + } else list_add_tail(&t_alg->entry, &priv->alg_list); - if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD && - !memcmp(driver_algs[i].name, "authenc", 7) && - !done) { - char *name; - - name = driver_algs[i].name; - memmove(name + 10, name + 7, strlen(name) - 7); - memcpy(name + 7, "esn", 3); - - name = driver_algs[i].driver_name; - memmove(name + 10, name + 7, strlen(name) - 7); - memcpy(name + 7, "esn", 3); - - done = true; - goto authencesn; - } - } } if (!list_empty(&priv->alg_list)) dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index cf15e7813801..762aeff626ac 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -23,7 +23,6 @@ #include <linux/types.h> #include <linux/debugfs.h> #include <linux/circ_buf.h> -#include <linux/string.h> #include <net/xfrm.h> #include <crypto/algapi.h> diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 09b184adf31b..5b2b5e61e4f9 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -38,7 +38,6 @@ #include <linux/spinlock.h> #include <linux/rtnetlink.h> #include <linux/slab.h> -#include <linux/string.h> #include <crypto/algapi.h> #include <crypto/aes.h> @@ -1974,11 +1973,7 @@ struct talitos_alg_template { }; static struct talitos_alg_template driver_algs[] = { - /* - * AEAD algorithms. These use a single-pass ipsec_esp descriptor. - * authencesn(*,*) is also registered, although not present - * explicitly here. - */ + /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ { .type = CRYPTO_ALG_TYPE_AEAD, .alg.crypto = { .cra_name = "authenc(hmac(sha1),cbc(aes))", @@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev) if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { struct talitos_crypto_alg *t_alg; char *name = NULL; - bool authenc = false; -authencesn: t_alg = talitos_alg_alloc(dev, &driver_algs[i]); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); @@ -2837,8 +2830,6 @@ authencesn: err = crypto_register_alg( &t_alg->algt.alg.crypto); name = t_alg->algt.alg.crypto.cra_driver_name; - authenc = authenc ? !authenc : - !(bool)memcmp(name, "authenc", 7); break; case CRYPTO_ALG_TYPE_AHASH: err = crypto_register_ahash( @@ -2851,25 +2842,8 @@ authencesn: dev_err(dev, "%s alg registration failed\n", name); kfree(t_alg); - } else { + } else list_add_tail(&t_alg->entry, &priv->alg_list); - if (authenc) { - struct crypto_alg *alg = - &driver_algs[i].alg.crypto; - - name = alg->cra_name; - memmove(name + 10, name + 7, - strlen(name) - 7); - memcpy(name + 7, "esn", 3); - - name = alg->cra_driver_name; - memmove(name + 10, name + 7, - strlen(name) - 7); - memcpy(name + 7, "esn", 3); - - goto authencesn; - } - } } } if (!list_empty(&priv->alg_list)) diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8bc5fef07e7a..22c9063e0120 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { .shutdown = ux500_cryp_shutdown, .driver = { .owner = THIS_MODULE, - .name = "cryp1" + .name = "cryp1", .pm = &ux500_cryp_pm, } }; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80b69971cf28..aeaea32bcfda 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -83,6 +83,7 @@ config INTEL_IOP_ADMA config DW_DMAC tristate "Synopsys DesignWare AHB DMA support" + depends on GENERIC_HARDIRQS select DMA_ENGINE default y if CPU_AT32AP7000 help diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6e13f262139a..88cfc61329d2 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan) dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); - BUG_ON(atc_chan_is_enabled(atchan)); - /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. @@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan) { dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + if (atc_chan_is_enabled(atchan)) + return; + if (list_empty(&atchan->active_list) || list_is_singular(&atchan->active_list)) { atc_complete_all(atchan); @@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan) return; spin_lock_irqsave(&atchan->lock, flags); - if (!atc_chan_is_enabled(atchan)) { - atc_advance_work(atchan); - } + atc_advance_work(atchan); spin_unlock_irqrestore(&atchan->lock, flags); } diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index c599558faeda..43a5329d4483 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst) *maxburst = 0; } +static inline void convert_slave_id(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + dwc->dma_sconfig.slave_id -= dw->request_line_base; +} + static int set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { @@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) convert_burst(&dwc->dma_sconfig.src_maxburst); convert_burst(&dwc->dma_sconfig.dst_maxburst); + convert_slave_id(dwc); return 0; } @@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 3) return NULL; - fargs.req = be32_to_cpup(dma_spec->args+0); - fargs.src = be32_to_cpup(dma_spec->args+1); - fargs.dst = be32_to_cpup(dma_spec->args+2); + fargs.req = dma_spec->args[0]; + fargs.src = dma_spec->args[1]; + fargs.dst = dma_spec->args[2]; if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || fargs.src >= dw->nr_masters || @@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev) static int dw_probe(struct platform_device *pdev) { + const struct platform_device_id *match; struct dw_dma_platform_data *pdata; struct resource *io; struct dw_dma *dw; @@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev) memcpy(dw->data_width, pdata->data_width, 4); } + /* Get the base request line if set */ + match = platform_get_device_id(pdev); + if (match) + dw->request_line_base = (unsigned int)match->driver_data; + /* Calculate all channel mask before DMA setup */ dw->all_chan_mask = (1 << nr_channels) - 1; @@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table); #endif static const struct platform_device_id dw_dma_ids[] = { - { "INTL9C60", 0 }, + /* Name, Request Line Base */ + { "INTL9C60", (kernel_ulong_t)16 }, { } }; diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index cf0ce5c77d60..4d02c3669b75 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -247,6 +247,7 @@ struct dw_dma { /* hardware configuration */ unsigned char nr_masters; unsigned char data_width[4]; + unsigned int request_line_base; struct dw_dma_chan chan[0]; }; diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c4b4fd2acc42..08b43bf37158 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); if (vchan_issue_pending(&c->vc) && !c->desc) { - struct omap_dmadev *d = to_omap_dma_dev(chan->device); - spin_lock(&d->lock); - if (list_empty(&c->node)) - list_add_tail(&c->node, &d->pending); - spin_unlock(&d->lock); - tasklet_schedule(&d->task); + /* + * c->cyclic is used only by audio and in this case the DMA need + * to be started without delay. + */ + if (!c->cyclic) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } else { + omap_dma_start_desc(c); + } } spin_unlock_irqrestore(&c->vc.lock, flags); } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 718153122759..5dbc5946c4c3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) { struct dma_pl330_platdata *pdat; struct dma_pl330_dmac *pdmac; - struct dma_pl330_chan *pch; + struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; struct dma_device *pd; struct resource *res; @@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ret = dma_async_device_register(pd); if (ret) { dev_err(&adev->dev, "unable to register DMAC\n"); - goto probe_err2; + goto probe_err3; + } + + if (adev->dev.of_node) { + ret = of_dma_controller_register(adev->dev.of_node, + of_dma_pl330_xlate, pdmac); + if (ret) { + dev_err(&adev->dev, + "unable to register DMA to the generic DT DMA helpers\n"); + } } dev_info(&adev->dev, @@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, pi->pcfg.num_peri, pi->pcfg.num_events); - ret = of_dma_controller_register(adev->dev.of_node, - of_dma_pl330_xlate, pdmac); - if (ret) { - dev_err(&adev->dev, - "unable to register DMA to the generic DT DMA helpers\n"); - goto probe_err2; - } - return 0; +probe_err3: + amba_set_drvdata(adev, NULL); + /* Idle the DMAC */ + list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + chan.device_node) { + + /* Remove the channel */ + list_del(&pch->chan.device_node); + + /* Flush the channel */ + pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); + pl330_free_chan_resources(&pch->chan); + } probe_err2: pl330_del(pi); probe_err1: @@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev) if (!pdmac) return 0; - of_dma_controller_free(adev->dev.of_node); + if (adev->dev.of_node) + of_dma_controller_free(adev->dev.of_node); + dma_async_device_unregister(&pdmac->ddma); amba_set_drvdata(adev, NULL); /* Idle the DMAC */ diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 910b0116c128..e1d13c463c90 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci) edac_dbg(1, "MC node: %d, csrow: %d\n", pvt->mc_node_id, i); - if (row_dct0) + if (row_dct0) { nr_pages = amd64_csrow_nr_pages(pvt, 0, i); + csrow->channels[0]->dimm->nr_pages = nr_pages; + } /* K8 has only one DCT */ - if (boot_cpu_data.x86 != 0xf && row_dct1) - nr_pages += amd64_csrow_nr_pages(pvt, 1, i); + if (boot_cpu_data.x86 != 0xf && row_dct1) { + int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); + + csrow->channels[1]->dimm->nr_pages = row_dct1_pages; + nr_pages += row_dct1_pages; + } mtype = amd64_determine_memory_type(pvt, i); @@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci) dimm = csrow->channels[j]->dimm; dimm->mtype = mtype; dimm->edac_mode = edac_mode; - dimm->nr_pages = nr_pages; } - csrow->nr_pages = nr_pages; } return empty; @@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2) mci->pvt_info = pvt; mci->pdev = &pvt->F2->dev; - mci->csbased = 1; setup_mci_misc_attrs(mci, fam_type); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index cdb81aa73ab7..27e86d938262 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) edac_dimm_info_location(dimm, location, sizeof(location)); edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", - dimm->mci->mem_is_per_rank ? "rank" : "dimm", + dimm->mci->csbased ? "rank" : "dimm", number, location, dimm->csrow, dimm->cschannel); edac_dbg(4, " dimm = %p\n", dimm); edac_dbg(4, " dimm->label = '%s'\n", dimm->label); @@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, memcpy(mci->layers, layers, sizeof(*layer) * n_layers); mci->nr_csrows = tot_csrows; mci->num_cschannel = tot_channels; - mci->mem_is_per_rank = per_rank; + mci->csbased = per_rank; /* * Alocate and fill the csrow/channels structs @@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, * incrementing the compat API counters */ edac_dbg(4, "%s csrows map: (%d,%d)\n", - mci->mem_is_per_rank ? "rank" : "dimm", + mci->csbased ? "rank" : "dimm", dimm->csrow, dimm->cschannel); if (row == -1) row = dimm->csrow; diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 4f4b6137d74e..5899a76eec3b 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -143,7 +143,7 @@ static const char *edac_caps[] = { * and the per-dimm/per-rank one */ #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ - struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) + static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) struct dev_ch_attribute { struct device_attribute attr; @@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev, int i; u32 nr_pages = 0; - if (csrow->mci->csbased) - return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); - for (i = 0; i < csrow->nr_channels; i++) nr_pages += csrow->channels[i]->dimm->nr_pages; return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); @@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci, device_initialize(&dimm->dev); dimm->dev.parent = &mci->dev; - if (mci->mem_is_per_rank) + if (mci->csbased) dev_set_name(&dimm->dev, "rank%d", index); else dev_set_name(&dimm->dev, "dimm%d", index); @@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct device *dev, for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { struct csrow_info *csrow = mci->csrows[csrow_idx]; - if (csrow->mci->csbased) { - total_pages += csrow->nr_pages; - } else { - for (j = 0; j < csrow->nr_channels; j++) { - struct dimm_info *dimm = csrow->channels[j]->dimm; + for (j = 0; j < csrow->nr_channels; j++) { + struct dimm_info *dimm = csrow->channels[j]->dimm; - total_pages += dimm->nr_pages; - } + total_pages += dimm->nr_pages; } } diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index 0d42ea791060..a333bf3517de 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c @@ -19,19 +19,40 @@ /* There is only *one* pci_eisa device per machine, right ? */ static struct eisa_root_device pci_eisa_root; -static int __init pci_eisa_init(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __init pci_eisa_init(struct pci_dev *pdev) { - int rc; + int rc, i; + struct resource *res, *bus_res = NULL; if ((rc = pci_enable_device (pdev))) { dev_err(&pdev->dev, "Could not enable device\n"); return rc; } + /* + * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI + * device, so the resources available on EISA are the same as those + * available on the 82375 bus. This works the same as a PCI-PCI + * bridge in subtractive-decode mode (see pci_read_bridge_bases()). + * We assume other PCI-EISA bridges are similar. + * + * eisa_root_register() can only deal with a single io port resource, + * so we use the first valid io port resource. + */ + pci_bus_for_each_resource(pdev->bus, res, i) + if (res && (res->flags & IORESOURCE_IO)) { + bus_res = res; + break; + } + + if (!bus_res) { + dev_err(&pdev->dev, "No resources available\n"); + return -1; + } + pci_eisa_root.dev = &pdev->dev; - pci_eisa_root.res = pdev->bus->resource[0]; - pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; + pci_eisa_root.res = bus_res; + pci_eisa_root.bus_base_addr = bus_res->start; pci_eisa_root.slots = EISA_MAX_SLOTS; pci_eisa_root.dma_mask = pdev->dma_mask; dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); @@ -44,22 +65,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev, return 0; } -static struct pci_device_id pci_eisa_pci_tbl[] = { - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, - { 0, } -}; +/* + * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). + * Otherwise pnp resource will get enabled early and could prevent eisa + * to be initialized. + * Also need to make sure pci_eisa_init_early() is called after + * x86/pci_subsys_init(). + * So need to use subsys_initcall_sync with it. + */ +static int __init pci_eisa_init_early(void) +{ + struct pci_dev *dev = NULL; + int ret; -static struct pci_driver __refdata pci_eisa_driver = { - .name = "pci_eisa", - .id_table = pci_eisa_pci_tbl, - .probe = pci_eisa_init, -}; + for_each_pci_dev(dev) + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { + ret = pci_eisa_init(dev); + if (ret) + return ret; + } -static int __init pci_eisa_init_module (void) -{ - return pci_register_driver (&pci_eisa_driver); + return 0; } - -device_initcall(pci_eisa_init_module); -MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); +subsys_initcall_sync(pci_eisa_init_early); diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index b70e3815c459..8f3c947b0029 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c @@ -32,6 +32,38 @@ #define DEV_NAME "max77693-muic" #define DELAY_MS_DEFAULT 20000 /* unit: millisecond */ +/* + * Default value of MAX77693 register to bring up MUIC device. + * If user don't set some initial value for MUIC device through platform data, + * extcon-max77693 driver use 'default_init_data' to bring up base operation + * of MAX77693 MUIC device. + */ +struct max77693_reg_data default_init_data[] = { + { + /* STATUS2 - [3]ChgDetRun */ + .addr = MAX77693_MUIC_REG_STATUS2, + .data = STATUS2_CHGDETRUN_MASK, + }, { + /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */ + .addr = MAX77693_MUIC_REG_INTMASK1, + .data = INTMASK1_ADC1K_MASK + | INTMASK1_ADC_MASK, + }, { + /* INTMASK2 - Unmask [0]ChgTypM */ + .addr = MAX77693_MUIC_REG_INTMASK2, + .data = INTMASK2_CHGTYP_MASK, + }, { + /* INTMASK3 - Mask all of interrupts */ + .addr = MAX77693_MUIC_REG_INTMASK3, + .data = 0x0, + }, { + /* CDETCTRL2 */ + .addr = MAX77693_MUIC_REG_CDETCTRL2, + .data = CDETCTRL2_VIDRMEN_MASK + | CDETCTRL2_DXOVPEN_MASK, + }, +}; + enum max77693_muic_adc_debounce_time { ADC_DEBOUNCE_TIME_5MS = 0, ADC_DEBOUNCE_TIME_10MS, @@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev) { struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent); struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev); - struct max77693_muic_platform_data *muic_pdata = pdata->muic_data; struct max77693_muic_info *info; + struct max77693_reg_data *init_data; + int num_init_data; int delay_jiffies; int ret; int i; @@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev) goto err_irq; } - /* Initialize MUIC register by using platform data */ - for (i = 0 ; i < muic_pdata->num_init_data ; i++) { - enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR; + + /* Initialize MUIC register by using platform data or default data */ + if (pdata->muic_data) { + init_data = pdata->muic_data->init_data; + num_init_data = pdata->muic_data->num_init_data; + } else { + init_data = default_init_data; + num_init_data = ARRAY_SIZE(default_init_data); + } + + for (i = 0 ; i < num_init_data ; i++) { + enum max77693_irq_source irq_src + = MAX77693_IRQ_GROUP_NR; max77693_write_reg(info->max77693->regmap_muic, - muic_pdata->init_data[i].addr, - muic_pdata->init_data[i].data); + init_data[i].addr, + init_data[i].data); - switch (muic_pdata->init_data[i].addr) { + switch (init_data[i].addr) { case MAX77693_MUIC_REG_INTMASK1: irq_src = MUIC_INT1; break; @@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev) if (irq_src < MAX77693_IRQ_GROUP_NR) info->max77693->irq_masks_cur[irq_src] - = muic_pdata->init_data[i].data; + = init_data[i].data; } - /* - * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB - * h/w path of COMP2/COMN1 on CONTROL1 register. - */ - if (muic_pdata->path_uart) - info->path_uart = muic_pdata->path_uart; - else - info->path_uart = CONTROL1_SW_UART; + if (pdata->muic_data) { + struct max77693_muic_platform_data *muic_pdata = pdata->muic_data; - if (muic_pdata->path_usb) - info->path_usb = muic_pdata->path_usb; - else + /* + * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB + * h/w path of COMP2/COMN1 on CONTROL1 register. + */ + if (muic_pdata->path_uart) + info->path_uart = muic_pdata->path_uart; + else + info->path_uart = CONTROL1_SW_UART; + + if (muic_pdata->path_usb) + info->path_usb = muic_pdata->path_usb; + else + info->path_usb = CONTROL1_SW_USB; + + /* + * Default delay time for detecting cable state + * after certain time. + */ + if (muic_pdata->detcable_delay_ms) + delay_jiffies = + msecs_to_jiffies(muic_pdata->detcable_delay_ms); + else + delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); + } else { info->path_usb = CONTROL1_SW_USB; + info->path_uart = CONTROL1_SW_UART; + delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); + } /* Set initial path for UART */ max77693_muic_set_path(info, info->path_uart, true); @@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev) * driver should notify cable state to upper layer. */ INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); - if (muic_pdata->detcable_delay_ms) - delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms); - else - delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); schedule_delayed_work(&info->wq_detcable, delay_jiffies); return ret; diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index e636d950ad6c..69641bcae325 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c @@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev) goto err_irq; } - /* Initialize registers according to platform data */ if (pdata->muic_pdata) { - struct max8997_muic_platform_data *mdata = info->muic_pdata; - - for (i = 0; i < mdata->num_init_data; i++) { - max8997_write_reg(info->muic, mdata->init_data[i].addr, - mdata->init_data[i].data); + struct max8997_muic_platform_data *muic_pdata + = pdata->muic_pdata; + + /* Initialize registers according to platform data */ + for (i = 0; i < muic_pdata->num_init_data; i++) { + max8997_write_reg(info->muic, + muic_pdata->init_data[i].addr, + muic_pdata->init_data[i].data); } - } - /* - * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB - * h/w path of COMP2/COMN1 on CONTROL1 register. - */ - if (pdata->muic_pdata->path_uart) - info->path_uart = pdata->muic_pdata->path_uart; - else - info->path_uart = CONTROL1_SW_UART; + /* + * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB + * h/w path of COMP2/COMN1 on CONTROL1 register. + */ + if (muic_pdata->path_uart) + info->path_uart = muic_pdata->path_uart; + else + info->path_uart = CONTROL1_SW_UART; - if (pdata->muic_pdata->path_usb) - info->path_usb = pdata->muic_pdata->path_usb; - else + if (muic_pdata->path_usb) + info->path_usb = muic_pdata->path_usb; + else + info->path_usb = CONTROL1_SW_USB; + + /* + * Default delay time for detecting cable state + * after certain time. + */ + if (muic_pdata->detcable_delay_ms) + delay_jiffies = + msecs_to_jiffies(muic_pdata->detcable_delay_ms); + else + delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); + } else { + info->path_uart = CONTROL1_SW_UART; info->path_usb = CONTROL1_SW_USB; + delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); + } /* Set initial path for UART */ max8997_muic_set_path(info, info->path_uart, true); @@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev) * driver should notify cable state to upper layer. */ INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); - if (pdata->muic_pdata->detcable_delay_ms) - delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms); - else - delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); schedule_delayed_work(&info->wq_detcable, delay_jiffies); return 0; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 9b00072a020f..3e532002e4d1 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP config EFI_VARS tristate "EFI Variable Support via sysfs" depends on EFI + select UCS2_STRING default n help If you say Y here, you are able to get EFI (Extensible Firmware @@ -53,6 +54,24 @@ config EFI_VARS Subsequent efibootmgr releases may be found at: <http://linux.dell.com/efibootmgr> +config EFI_VARS_PSTORE + bool "Register efivars backend for pstore" + depends on EFI_VARS && PSTORE + default y + help + Say Y here to enable use efivars as a backend to pstore. This + will allow writing console messages, crash dumps, or anything + else supported by pstore to EFI variables. + +config EFI_VARS_PSTORE_DEFAULT_DISABLE + bool "Disable using efivars as a pstore backend by default" + depends on EFI_VARS_PSTORE + default n + help + Saying Y here will disable the use of efivars as a storage + backend for pstore by default. This setting can be overridden + using the efivars module's pstore_disable parameter. + config EFI_PCDP bool "Console device selection via EFI PCDP or HCDP table" depends on ACPI && EFI && IA64 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index fe62aa392239..f4baa11d3644 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -80,6 +80,7 @@ #include <linux/slab.h> #include <linux/pstore.h> #include <linux/ctype.h> +#include <linux/ucs2_string.h> #include <linux/fs.h> #include <linux/ramfs.h> @@ -103,6 +104,11 @@ MODULE_VERSION(EFIVARS_VERSION); */ #define GUID_LEN 36 +static bool efivars_pstore_disable = + IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE); + +module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644); + /* * The maximum size of VariableName + Data = 1024 * Therefore, it's reasonable to save that much @@ -165,51 +171,7 @@ efivar_create_sysfs_entry(struct efivars *efivars, static void efivar_update_sysfs_entries(struct work_struct *); static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); - -/* Return the number of unicode characters in data */ -static unsigned long -utf16_strnlen(efi_char16_t *s, size_t maxlength) -{ - unsigned long length = 0; - - while (*s++ != 0 && length < maxlength) - length++; - return length; -} - -static inline unsigned long -utf16_strlen(efi_char16_t *s) -{ - return utf16_strnlen(s, ~0UL); -} - -/* - * Return the number of bytes is the length of this string - * Note: this is NOT the same as the number of unicode characters - */ -static inline unsigned long -utf16_strsize(efi_char16_t *data, unsigned long maxlength) -{ - return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); -} - -static inline int -utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) -{ - while (1) { - if (len == 0) - return 0; - if (*a < *b) - return -1; - if (*a > *b) - return 1; - if (*a == 0) /* implies *b == 0 */ - return 0; - a++; - b++; - len--; - } -} +static bool efivar_wq_enabled = true; static bool validate_device_path(struct efi_variable *var, int match, u8 *buffer, @@ -262,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, u16 filepathlength; int i, desclength = 0, namelen; - namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); + namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); /* Either "Boot" or "Driver" followed by four digits of hex */ for (i = match; i < match+4; i++) { @@ -285,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, * There's no stored length for the description, so it has to be * found by hand */ - desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; + desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; /* Each boot entry must have a descriptor */ if (!desclength) @@ -430,24 +392,12 @@ static efi_status_t check_var_size_locked(struct efivars *efivars, u32 attributes, unsigned long size) { - u64 storage_size, remaining_size, max_size; - efi_status_t status; const struct efivar_operations *fops = efivars->ops; - if (!efivars->ops->query_variable_info) + if (!efivars->ops->query_variable_store) return EFI_UNSUPPORTED; - status = fops->query_variable_info(attributes, &storage_size, - &remaining_size, &max_size); - - if (status != EFI_SUCCESS) - return status; - - if (!storage_size || size > remaining_size || size > max_size || - (remaining_size - size) < (storage_size / 2)) - return EFI_OUT_OF_RESOURCES; - - return status; + return fops->query_variable_store(attributes, size); } @@ -587,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) spin_lock_irq(&efivars->lock); status = check_var_size_locked(efivars, new_var->Attributes, - new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); + new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024)); if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) status = efivars->ops->set_variable(new_var->VariableName, @@ -765,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file, * QueryVariableInfo() isn't supported by the firmware. */ - varsize = datasize + utf16_strsize(var->var.VariableName, 1024); + varsize = datasize + ucs2_strsize(var->var.VariableName, 1024); status = check_var_size(efivars, attributes, varsize); if (status != EFI_SUCCESS) { @@ -1217,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) inode = NULL; - len = utf16_strlen(entry->var.VariableName); + len = ucs2_strlen(entry->var.VariableName); /* name, plus '-', plus GUID, plus NUL*/ name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC); @@ -1309,9 +1259,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = { .create = efivarfs_create, }; -static struct pstore_info efi_pstore_info; - -#ifdef CONFIG_PSTORE +#ifdef CONFIG_EFI_VARS_PSTORE static int efi_pstore_open(struct pstore_info *psi) { @@ -1441,7 +1389,7 @@ static int efi_pstore_write(enum pstore_type_id type, spin_unlock_irqrestore(&efivars->lock, flags); - if (reason == KMSG_DUMP_OOPS) + if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled) schedule_work(&efivar_work); *id = part; @@ -1477,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, if (efi_guidcmp(entry->var.VendorGuid, vendor)) continue; - if (utf16_strncmp(entry->var.VariableName, efi_name, - utf16_strlen(efi_name))) { + if (ucs2_strncmp(entry->var.VariableName, efi_name, + ucs2_strlen(efi_name))) { /* * Check if an old format, * which doesn't support holding @@ -1490,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, for (i = 0; i < DUMP_NAME_LEN; i++) efi_name_old[i] = name_old[i]; - if (utf16_strncmp(entry->var.VariableName, efi_name_old, - utf16_strlen(efi_name_old))) + if (ucs2_strncmp(entry->var.VariableName, efi_name_old, + ucs2_strlen(efi_name_old))) continue; } @@ -1514,38 +1462,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, return 0; } -#else -static int efi_pstore_open(struct pstore_info *psi) -{ - return 0; -} - -static int efi_pstore_close(struct pstore_info *psi) -{ - return 0; -} - -static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count, - struct timespec *timespec, - char **buf, struct pstore_info *psi) -{ - return -1; -} - -static int efi_pstore_write(enum pstore_type_id type, - enum kmsg_dump_reason reason, u64 *id, - unsigned int part, int count, size_t size, - struct pstore_info *psi) -{ - return 0; -} - -static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, - struct timespec time, struct pstore_info *psi) -{ - return 0; -} -#endif static struct pstore_info efi_pstore_info = { .owner = THIS_MODULE, @@ -1557,6 +1473,24 @@ static struct pstore_info efi_pstore_info = { .erase = efi_pstore_erase, }; +static void efivar_pstore_register(struct efivars *efivars) +{ + efivars->efi_pstore_info = efi_pstore_info; + efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); + if (efivars->efi_pstore_info.buf) { + efivars->efi_pstore_info.bufsize = 1024; + efivars->efi_pstore_info.data = efivars; + spin_lock_init(&efivars->efi_pstore_info.buf_lock); + pstore_register(&efivars->efi_pstore_info); + } +} +#else +static void efivar_pstore_register(struct efivars *efivars) +{ + return; +} +#endif + static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) @@ -1583,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf16_strsize(new_var->VariableName, 1024); + strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024); + strsize2 = ucs2_strsize(new_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), new_var->VariableName, strsize1) && @@ -1600,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, } status = check_var_size_locked(efivars, new_var->Attributes, - new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); + new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024)); if (status && status != EFI_UNSUPPORTED) { spin_unlock_irq(&efivars->lock); @@ -1624,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, /* Create the entry in sysfs. Locking is not required here */ status = efivar_create_sysfs_entry(efivars, - utf16_strsize(new_var->VariableName, + ucs2_strsize(new_var->VariableName, 1024), new_var->VariableName, &new_var->VendorGuid); @@ -1654,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf16_strsize(del_var->VariableName, 1024); + strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024); + strsize2 = ucs2_strsize(del_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), del_var->VariableName, strsize1) && @@ -1694,16 +1628,17 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, return count; } -static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor) +static bool variable_is_present(struct efivars *efivars, + efi_char16_t *variable_name, + efi_guid_t *vendor) { struct efivar_entry *entry, *n; - struct efivars *efivars = &__efivars; unsigned long strsize1, strsize2; bool found = false; - strsize1 = utf16_strsize(variable_name, 1024); + strsize1 = ucs2_strsize(variable_name, 1024); list_for_each_entry_safe(entry, n, &efivars->list, list) { - strsize2 = utf16_strsize(entry->var.VariableName, 1024); + strsize2 = ucs2_strsize(entry->var.VariableName, 1024); if (strsize1 == strsize2 && !memcmp(variable_name, &(entry->var.VariableName), strsize2) && @@ -1716,6 +1651,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor) return found; } +/* + * Returns the size of variable_name, in bytes, including the + * terminating NULL character, or variable_name_size if no NULL + * character is found among the first variable_name_size bytes. + */ +static unsigned long var_name_strnsize(efi_char16_t *variable_name, + unsigned long variable_name_size) +{ + unsigned long len; + efi_char16_t c; + + /* + * The variable name is, by definition, a NULL-terminated + * string, so make absolutely sure that variable_name_size is + * the value we expect it to be. If not, return the real size. + */ + for (len = 2; len <= variable_name_size; len += sizeof(c)) { + c = variable_name[(len / sizeof(c)) - 1]; + if (!c) + break; + } + + return min(len, variable_name_size); +} + static void efivar_update_sysfs_entries(struct work_struct *work) { struct efivars *efivars = &__efivars; @@ -1744,8 +1704,8 @@ static void efivar_update_sysfs_entries(struct work_struct *work) if (status != EFI_SUCCESS) { break; } else { - if (!variable_is_present(variable_name, - &vendor)) { + if (!variable_is_present(efivars, + variable_name, &vendor)) { found = true; break; } @@ -1756,10 +1716,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work) if (!found) { kfree(variable_name); break; - } else + } else { + variable_name_size = var_name_strnsize(variable_name, + variable_name_size); efivar_create_sysfs_entry(efivars, variable_name_size, variable_name, &vendor); + } } } @@ -1958,6 +1921,35 @@ void unregister_efivars(struct efivars *efivars) } EXPORT_SYMBOL_GPL(unregister_efivars); +/* + * Print a warning when duplicate EFI variables are encountered and + * disable the sysfs workqueue since the firmware is buggy. + */ +static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, + unsigned long len16) +{ + size_t i, len8 = len16 / sizeof(efi_char16_t); + char *s8; + + /* + * Disable the workqueue since the algorithm it uses for + * detecting new variables won't work with this buggy + * implementation of GetNextVariableName(). + */ + efivar_wq_enabled = false; + + s8 = kzalloc(len8, GFP_KERNEL); + if (!s8) + return; + + for (i = 0; i < len8; i++) + s8[i] = s16[i]; + + printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", + s8, vendor_guid); + kfree(s8); +} + int register_efivars(struct efivars *efivars, const struct efivar_operations *ops, struct kobject *parent_kobj) @@ -2006,6 +1998,25 @@ int register_efivars(struct efivars *efivars, &vendor_guid); switch (status) { case EFI_SUCCESS: + variable_name_size = var_name_strnsize(variable_name, + variable_name_size); + + /* + * Some firmware implementations return the + * same variable name on multiple calls to + * get_next_variable(). Terminate the loop + * immediately as there is no guarantee that + * we'll ever see a different variable name, + * and may end up looping here forever. + */ + if (variable_is_present(efivars, variable_name, + &vendor_guid)) { + dup_variable_bug(variable_name, &vendor_guid, + variable_name_size); + status = EFI_NOT_FOUND; + break; + } + efivar_create_sysfs_entry(efivars, variable_name_size, variable_name, @@ -2025,15 +2036,8 @@ int register_efivars(struct efivars *efivars, if (error) unregister_efivars(efivars); - efivars->efi_pstore_info = efi_pstore_info; - - efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); - if (efivars->efi_pstore_info.buf) { - efivars->efi_pstore_info.bufsize = 1024; - efivars->efi_pstore_info.data = efivars; - spin_lock_init(&efivars->efi_pstore_info.buf_lock); - pstore_register(&efivars->efi_pstore_info); - } + if (!efivars_pstore_disable) + efivar_pstore_register(efivars); register_filesystem(&efivarfs_type); @@ -2073,7 +2077,7 @@ efivars_init(void) ops.get_variable = efi.get_variable; ops.set_variable = efi.set_variable; ops.get_next_variable = efi.get_next_variable; - ops.query_variable_info = efi.query_variable_info; + ops.query_variable_store = efi_query_variable_store; error = register_efivars(&__efivars, &ops, efi_kobj); if (error) diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index f9dbd503fc40..de3c317bd3e2 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr) * If it can't be trusted, assume that the pin can be used as a GPIO. */ if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) - return 1; + return 0; return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; } diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 7472182967ce..61a6fde6c089 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -42,6 +42,7 @@ #include <linux/io.h> #include <linux/of_irq.h> #include <linux/of_device.h> +#include <linux/clk.h> #include <linux/pinctrl/consumer.h> /* @@ -496,6 +497,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) struct resource *res; struct irq_chip_generic *gc; struct irq_chip_type *ct; + struct clk *clk; unsigned int ngpios; int soc_variant; int i, cpu, id; @@ -529,6 +531,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev) return id; } + clk = devm_clk_get(&pdev->dev, NULL); + /* Not all SoCs require a clock.*/ + if (!IS_ERR(clk)) + clk_prepare_enable(clk); + mvchip->soc_variant = soc_variant; mvchip->chip.label = dev_name(&pdev->dev); mvchip->chip.dev = &pdev->dev; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 24059462c87f..9391cf16e990 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, chip->gpio_chip.ngpio, irq_base, &pca953x_irq_simple_ops, - NULL); + chip); if (!chip->domain) return -ENODEV; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 9cc108d2b770..8325f580c0f1 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -642,7 +642,12 @@ static struct platform_driver pxa_gpio_driver = { .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, }; -module_platform_driver(pxa_gpio_driver); + +static int __init pxa_gpio_init(void) +{ + return platform_driver_register(&pxa_gpio_driver); +} +postcore_initcall(pxa_gpio_init); #ifdef CONFIG_PM static int pxa_gpio_suspend(void) diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 770476a9da87..3ce5bc38ac31 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { .xlate = irq_domain_xlate_twocell, }; -static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) +static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, + struct device_node *np) { - int base = stmpe_gpio->irq_base; + int base = 0; - stmpe_gpio->domain = irq_domain_add_simple(NULL, + if (!np) + base = stmpe_gpio->irq_base; + + stmpe_gpio->domain = irq_domain_add_simple(np, stmpe_gpio->chip.ngpio, base, &stmpe_gpio_irq_simple_ops, stmpe_gpio); if (!stmpe_gpio->domain) { @@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev) stmpe_gpio->chip = template_chip; stmpe_gpio->chip.ngpio = stmpe->num_gpios; stmpe_gpio->chip.dev = &pdev->dev; +#ifdef CONFIG_OF + stmpe_gpio->chip.of_node = np; +#endif stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; if (pdata) @@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) goto out_free; if (irq >= 0) { - ret = stmpe_gpio_irq_init(stmpe_gpio); + ret = stmpe_gpio_irq_init(stmpe_gpio, np); if (ret) goto out_disable; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a71a54a3e3f7..5150df6cba08 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) if (!np) return; - do { + for (;; index++) { ret = of_parse_phandle_with_args(np, "gpio-ranges", "#gpio-range-cells", index, &pinspec); if (ret) @@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) if (ret) break; - - } while (index++); + } } #else diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 792c3e3795ca..dd64a06dc5b4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev, fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); - drm_modeset_unlock_all(dev); return PTR_ERR(fb); } @@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev, fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); - drm_modeset_unlock_all(dev); return PTR_ERR(fb); } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c194f4e680ad..e2acfdbf7d3c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ @@ -1715,6 +1715,7 @@ set_size: } mode->type = DRM_MODE_TYPE_DRIVER; + mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 59d6b9bf204b..892ff9f95975 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!fb_helper->fb) return 0; - drm_modeset_lock_all(dev); + mutex_lock(&fb_helper->dev->mode_config.mutex); if (!drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; - drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->dev->mode_config.mutex); return 0; } DRM_DEBUG_KMS("\n"); @@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); + mutex_unlock(&fb_helper->dev->mode_config.mutex); + + drm_modeset_lock_all(dev); drm_setup_crtcs(fb_helper); drm_modeset_unlock_all(dev); - drm_fb_helper_set_par(fb_helper->fbdev); return 0; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 13fdcd10a605..429e07d0b0f1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp) int retcode = 0; int need_setup = 0; struct address_space *old_mapping; + struct address_space *old_imapping; minor = idr_find(&drm_minors_idr, minor_id); if (!minor) @@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp) if (!dev->open_count++) need_setup = 1; mutex_lock(&dev->struct_mutex); + old_imapping = inode->i_mapping; old_mapping = dev->dev_mapping; if (old_mapping == NULL) dev->dev_mapping = &inode->i_data; @@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp) err_undo: mutex_lock(&dev->struct_mutex); - filp->f_mapping = old_mapping; - inode->i_mapping = old_mapping; + filp->f_mapping = old_imapping; + inode->i_mapping = old_imapping; iput(container_of(dev->dev_mapping, struct inode, i_data)); dev->dev_mapping = old_mapping; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 36493ce71f9a..98cc14725ba9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -38,11 +38,12 @@ /* position control register for hardware window 0, 2 ~ 4.*/ #define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) #define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) -/* size control register for hardware window 0. */ -#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08) -/* alpha control register for hardware window 1 ~ 4. */ -#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16) -/* size control register for hardware window 1 ~ 4. */ +/* + * size control register for hardware windows 0 and alpha control register + * for hardware windows 1 ~ 4 + */ +#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16) +/* size control register for hardware windows 1 ~ 2. */ #define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) #define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) @@ -50,9 +51,9 @@ #define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) /* color key control register for hardware window 1 ~ 4. */ -#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8)) +#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8)) /* color key value register for hardware window 1 ~ 4. */ -#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8)) +#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8)) /* FIMD has totally five hardware windows. */ #define WINDOWS_NR 5 @@ -109,9 +110,9 @@ struct fimd_context { #ifdef CONFIG_OF static const struct of_device_id fimd_driver_dt_match[] = { - { .compatible = "samsung,exynos4-fimd", + { .compatible = "samsung,exynos4210-fimd", .data = &exynos4_fimd_driver_data }, - { .compatible = "samsung,exynos5-fimd", + { .compatible = "samsung,exynos5250-fimd", .data = &exynos5_fimd_driver_data }, {}, }; @@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos) if (win != 3 && win != 4) { u32 offset = VIDOSD_D(win); if (win == 0) - offset = VIDOSD_C_SIZE_W0; + offset = VIDOSD_C(win); val = win_data->ovl_width * win_data->ovl_height; writel(val, ctx->regs + offset); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3b0da0378acf..47a493c8a71f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -48,8 +48,14 @@ /* registers for base address */ #define G2D_SRC_BASE_ADDR 0x0304 +#define G2D_SRC_COLOR_MODE 0x030C +#define G2D_SRC_LEFT_TOP 0x0310 +#define G2D_SRC_RIGHT_BOTTOM 0x0314 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 #define G2D_DST_BASE_ADDR 0x0404 +#define G2D_DST_COLOR_MODE 0x040C +#define G2D_DST_LEFT_TOP 0x0410 +#define G2D_DST_RIGHT_BOTTOM 0x0414 #define G2D_DST_PLANE2_BASE_ADDR 0x0418 #define G2D_PAT_BASE_ADDR 0x0500 #define G2D_MSK_BASE_ADDR 0x0520 @@ -82,7 +88,7 @@ #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 /* G2D_DMA_HOLD_CMD */ -#define G2D_USET_HOLD (1 << 2) +#define G2D_USER_HOLD (1 << 2) #define G2D_LIST_HOLD (1 << 1) #define G2D_BITBLT_HOLD (1 << 0) @@ -91,13 +97,27 @@ #define G2D_START_NHOLT (1 << 1) #define G2D_START_BITBLT (1 << 0) +/* buffer color format */ +#define G2D_FMT_XRGB8888 0 +#define G2D_FMT_ARGB8888 1 +#define G2D_FMT_RGB565 2 +#define G2D_FMT_XRGB1555 3 +#define G2D_FMT_ARGB1555 4 +#define G2D_FMT_XRGB4444 5 +#define G2D_FMT_ARGB4444 6 +#define G2D_FMT_PACKED_RGB888 7 +#define G2D_FMT_A8 11 +#define G2D_FMT_L8 12 + +/* buffer valid length */ +#define G2D_LEN_MIN 1 +#define G2D_LEN_MAX 8000 + #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) #define G2D_CMDLIST_NUM 64 #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) -#define MAX_BUF_ADDR_NR 6 - /* maximum buffer pool size of userptr is 64MB as default */ #define MAX_POOL (64 * 1024 * 1024) @@ -106,6 +126,17 @@ enum { BUF_TYPE_USERPTR, }; +enum g2d_reg_type { + REG_TYPE_NONE = -1, + REG_TYPE_SRC, + REG_TYPE_SRC_PLANE2, + REG_TYPE_DST, + REG_TYPE_DST_PLANE2, + REG_TYPE_PAT, + REG_TYPE_MSK, + MAX_REG_TYPE_NR +}; + /* cmdlist data structure */ struct g2d_cmdlist { u32 head; @@ -113,6 +144,42 @@ struct g2d_cmdlist { u32 last; /* last data offset */ }; +/* + * A structure of buffer description + * + * @format: color format + * @left_x: the x coordinates of left top corner + * @top_y: the y coordinates of left top corner + * @right_x: the x coordinates of right bottom corner + * @bottom_y: the y coordinates of right bottom corner + * + */ +struct g2d_buf_desc { + unsigned int format; + unsigned int left_x; + unsigned int top_y; + unsigned int right_x; + unsigned int bottom_y; +}; + +/* + * A structure of buffer information + * + * @map_nr: manages the number of mapped buffers + * @reg_types: stores regitster type in the order of requested command + * @handles: stores buffer handle in its reg_type position + * @types: stores buffer type in its reg_type position + * @descs: stores buffer description in its reg_type position + * + */ +struct g2d_buf_info { + unsigned int map_nr; + enum g2d_reg_type reg_types[MAX_REG_TYPE_NR]; + unsigned long handles[MAX_REG_TYPE_NR]; + unsigned int types[MAX_REG_TYPE_NR]; + struct g2d_buf_desc descs[MAX_REG_TYPE_NR]; +}; + struct drm_exynos_pending_g2d_event { struct drm_pending_event base; struct drm_exynos_g2d_event event; @@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr { bool in_pool; bool out_of_list; }; - struct g2d_cmdlist_node { struct list_head list; struct g2d_cmdlist *cmdlist; - unsigned int map_nr; - unsigned long handles[MAX_BUF_ADDR_NR]; - unsigned int obj_type[MAX_BUF_ADDR_NR]; dma_addr_t dma_addr; + struct g2d_buf_info buf_info; struct drm_exynos_pending_g2d_event *event; }; @@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) struct exynos_drm_subdrv *subdrv = &g2d->subdrv; int nr; int ret; + struct g2d_buf_info *buf_info; init_dma_attrs(&g2d->cmdlist_dma_attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); @@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) } for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { + unsigned int i; + node[nr].cmdlist = g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; node[nr].dma_addr = g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; + buf_info = &node[nr].buf_info; + for (i = 0; i < MAX_REG_TYPE_NR; i++) + buf_info->reg_types[i] = REG_TYPE_NONE; + list_add_tail(&node[nr].list, &g2d->free_cmdlist); } @@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, DMA_BIDIRECTIONAL); if (ret < 0) { DRM_ERROR("failed to map sgt with dma region.\n"); - goto err_free_sgt; + goto err_sg_free_table; } g2d_userptr->dma_addr = sgt->sgl[0].dma_address; @@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, return &g2d_userptr->dma_addr; -err_free_sgt: +err_sg_free_table: sg_free_table(sgt); + +err_free_sgt: kfree(sgt); sgt = NULL; @@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev, g2d->current_pool = 0; } +static enum g2d_reg_type g2d_get_reg_type(int reg_offset) +{ + enum g2d_reg_type reg_type; + + switch (reg_offset) { + case G2D_SRC_BASE_ADDR: + case G2D_SRC_COLOR_MODE: + case G2D_SRC_LEFT_TOP: + case G2D_SRC_RIGHT_BOTTOM: + reg_type = REG_TYPE_SRC; + break; + case G2D_SRC_PLANE2_BASE_ADDR: + reg_type = REG_TYPE_SRC_PLANE2; + break; + case G2D_DST_BASE_ADDR: + case G2D_DST_COLOR_MODE: + case G2D_DST_LEFT_TOP: + case G2D_DST_RIGHT_BOTTOM: + reg_type = REG_TYPE_DST; + break; + case G2D_DST_PLANE2_BASE_ADDR: + reg_type = REG_TYPE_DST_PLANE2; + break; + case G2D_PAT_BASE_ADDR: + reg_type = REG_TYPE_PAT; + break; + case G2D_MSK_BASE_ADDR: + reg_type = REG_TYPE_MSK; + break; + default: + reg_type = REG_TYPE_NONE; + DRM_ERROR("Unknown register offset![%d]\n", reg_offset); + break; + }; + + return reg_type; +} + +static unsigned long g2d_get_buf_bpp(unsigned int format) +{ + unsigned long bpp; + + switch (format) { + case G2D_FMT_XRGB8888: + case G2D_FMT_ARGB8888: + bpp = 4; + break; + case G2D_FMT_RGB565: + case G2D_FMT_XRGB1555: + case G2D_FMT_ARGB1555: + case G2D_FMT_XRGB4444: + case G2D_FMT_ARGB4444: + bpp = 2; + break; + case G2D_FMT_PACKED_RGB888: + bpp = 3; + break; + default: + bpp = 1; + break; + } + + return bpp; +} + +static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc, + enum g2d_reg_type reg_type, + unsigned long size) +{ + unsigned int width, height; + unsigned long area; + + /* + * check source and destination buffers only. + * so the others are always valid. + */ + if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST) + return true; + + width = buf_desc->right_x - buf_desc->left_x; + if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) { + DRM_ERROR("width[%u] is out of range!\n", width); + return false; + } + + height = buf_desc->bottom_y - buf_desc->top_y; + if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) { + DRM_ERROR("height[%u] is out of range!\n", height); + return false; + } + + area = (unsigned long)width * (unsigned long)height * + g2d_get_buf_bpp(buf_desc->format); + if (area > size) { + DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size); + return false; + } + + return true; +} + static int g2d_map_cmdlist_gem(struct g2d_data *g2d, struct g2d_cmdlist_node *node, struct drm_device *drm_dev, struct drm_file *file) { struct g2d_cmdlist *cmdlist = node->cmdlist; + struct g2d_buf_info *buf_info = &node->buf_info; int offset; + int ret; int i; - for (i = 0; i < node->map_nr; i++) { + for (i = 0; i < buf_info->map_nr; i++) { + struct g2d_buf_desc *buf_desc; + enum g2d_reg_type reg_type; + int reg_pos; unsigned long handle; dma_addr_t *addr; - offset = cmdlist->last - (i * 2 + 1); - handle = cmdlist->data[offset]; + reg_pos = cmdlist->last - 2 * (i + 1); + + offset = cmdlist->data[reg_pos]; + handle = cmdlist->data[reg_pos + 1]; + + reg_type = g2d_get_reg_type(offset); + if (reg_type == REG_TYPE_NONE) { + ret = -EFAULT; + goto err; + } + + buf_desc = &buf_info->descs[reg_type]; + + if (buf_info->types[reg_type] == BUF_TYPE_GEM) { + unsigned long size; + + size = exynos_drm_gem_get_size(drm_dev, handle, file); + if (!size) { + ret = -EFAULT; + goto err; + } + + if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, + size)) { + ret = -EFAULT; + goto err; + } - if (node->obj_type[i] == BUF_TYPE_GEM) { addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, file); if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; + ret = -EFAULT; + goto err; } } else { struct drm_exynos_g2d_userptr g2d_userptr; if (copy_from_user(&g2d_userptr, (void __user *)handle, sizeof(struct drm_exynos_g2d_userptr))) { - node->map_nr = i; - return -EFAULT; + ret = -EFAULT; + goto err; + } + + if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, + g2d_userptr.size)) { + ret = -EFAULT; + goto err; } addr = g2d_userptr_get_dma_addr(drm_dev, @@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, file, &handle); if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; + ret = -EFAULT; + goto err; } } - cmdlist->data[offset] = *addr; - node->handles[i] = handle; + cmdlist->data[reg_pos + 1] = *addr; + buf_info->reg_types[i] = reg_type; + buf_info->handles[reg_type] = handle; } return 0; + +err: + buf_info->map_nr = i; + return ret; } static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, @@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, struct drm_file *filp) { struct exynos_drm_subdrv *subdrv = &g2d->subdrv; + struct g2d_buf_info *buf_info = &node->buf_info; int i; - for (i = 0; i < node->map_nr; i++) { - unsigned long handle = node->handles[i]; + for (i = 0; i < buf_info->map_nr; i++) { + struct g2d_buf_desc *buf_desc; + enum g2d_reg_type reg_type; + unsigned long handle; + + reg_type = buf_info->reg_types[i]; + + buf_desc = &buf_info->descs[reg_type]; + handle = buf_info->handles[reg_type]; - if (node->obj_type[i] == BUF_TYPE_GEM) + if (buf_info->types[reg_type] == BUF_TYPE_GEM) exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, filp); else g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, false); - node->handles[i] = 0; + buf_info->reg_types[i] = REG_TYPE_NONE; + buf_info->handles[reg_type] = 0; + buf_info->types[reg_type] = 0; + memset(buf_desc, 0x00, sizeof(*buf_desc)); } - node->map_nr = 0; + buf_info->map_nr = 0; } static void g2d_dma_start(struct g2d_data *g2d, @@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d, pm_runtime_get_sync(g2d->dev); clk_enable(g2d->gate_clk); - /* interrupt enable */ - writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF, - g2d->regs + G2D_INTEN); - writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); } @@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work) struct g2d_data *g2d = container_of(work, struct g2d_data, runqueue_work); - mutex_lock(&g2d->runqueue_mutex); clk_disable(g2d->gate_clk); pm_runtime_put_sync(g2d->dev); @@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev, int i; for (i = 0; i < nr; i++) { - index = cmdlist->last - 2 * (i + 1); + struct g2d_buf_info *buf_info = &node->buf_info; + struct g2d_buf_desc *buf_desc; + enum g2d_reg_type reg_type; + unsigned long value; - if (for_addr) { - /* check userptr buffer type. */ - reg_offset = (cmdlist->data[index] & - ~0x7fffffff) >> 31; - if (reg_offset) { - node->obj_type[i] = BUF_TYPE_USERPTR; - cmdlist->data[index] &= ~G2D_BUF_USERPTR; - } - } + index = cmdlist->last - 2 * (i + 1); reg_offset = cmdlist->data[index] & ~0xfffff000; - if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) goto err; if (reg_offset % 4) @@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev, if (!for_addr) goto err; - if (node->obj_type[i] != BUF_TYPE_USERPTR) - node->obj_type[i] = BUF_TYPE_GEM; + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + /* check userptr buffer type. */ + if ((cmdlist->data[index] & ~0x7fffffff) >> 31) { + buf_info->types[reg_type] = BUF_TYPE_USERPTR; + cmdlist->data[index] &= ~G2D_BUF_USERPTR; + } else + buf_info->types[reg_type] = BUF_TYPE_GEM; + break; + case G2D_SRC_COLOR_MODE: + case G2D_DST_COLOR_MODE: + if (for_addr) + goto err; + + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + buf_desc = &buf_info->descs[reg_type]; + value = cmdlist->data[index + 1]; + + buf_desc->format = value & 0xf; + break; + case G2D_SRC_LEFT_TOP: + case G2D_DST_LEFT_TOP: + if (for_addr) + goto err; + + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + buf_desc = &buf_info->descs[reg_type]; + value = cmdlist->data[index + 1]; + + buf_desc->left_x = value & 0x1fff; + buf_desc->top_y = (value & 0x1fff0000) >> 16; + break; + case G2D_SRC_RIGHT_BOTTOM: + case G2D_DST_RIGHT_BOTTOM: + if (for_addr) + goto err; + + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + buf_desc = &buf_info->descs[reg_type]; + value = cmdlist->data[index + 1]; + + buf_desc->right_x = value & 0x1fff; + buf_desc->bottom_y = (value & 0x1fff0000) >> 16; break; default: if (for_addr) @@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; cmdlist->data[cmdlist->last++] = 0; + /* + * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG + * and GCF bit should be set to INTEN register if user wants + * G2D interrupt event once current command list execution is + * finished. + * Otherwise only ACF bit should be set to INTEN register so + * that one interrupt is occured after all command lists + * have been completed. + */ if (node->event) { + cmdlist->data[cmdlist->last++] = G2D_INTEN; + cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF; cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; + } else { + cmdlist->data[cmdlist->last++] = G2D_INTEN; + cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; } /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ @@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, if (ret < 0) goto err_free_event; - node->map_nr = req->cmd_buf_nr; + node->buf_info.map_nr = req->cmd_buf_nr; if (req->cmd_buf_nr) { struct drm_exynos_g2d_cmd *cmd_buf; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 67e17ce112b6..0e6fe000578c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -164,6 +164,27 @@ out: exynos_gem_obj = NULL; } +unsigned long exynos_drm_gem_get_size(struct drm_device *dev, + unsigned int gem_handle, + struct drm_file *file_priv) +{ + struct exynos_drm_gem_obj *exynos_gem_obj; + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(dev, file_priv, gem_handle); + if (!obj) { + DRM_ERROR("failed to lookup gem object.\n"); + return 0; + } + + exynos_gem_obj = to_exynos_gem_obj(obj); + + drm_gem_object_unreference_unlocked(obj); + + return exynos_gem_obj->buffer->size; +} + + struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 35ebac47dc2b..468766bee450 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +/* get buffer size to gem handle. */ +unsigned long exynos_drm_gem_get_size(struct drm_device *dev, + unsigned int gem_handle, + struct drm_file *file_priv); + /* initialize gem object. */ int exynos_drm_gem_init_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 13ccbd4bcfaa..9504b0cd825a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev, } edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; - edid = kzalloc(edid_len, GFP_KERNEL); + edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); if (!edid) { DRM_DEBUG_KMS("failed to allocate edid\n"); return ERR_PTR(-ENOMEM); } - memcpy(edid, ctx->raw_edid, edid_len); return edid; } @@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, return -EINVAL; } edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; - ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); + ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL); if (!ctx->raw_edid) { DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); return -ENOMEM; } - memcpy(ctx->raw_edid, raw_edid, edid_len); } else { /* * with connection = 0, free raw_edid diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e919aba29b3d..2f4f72f07047 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win) mixer_ctx->win_data[win].enabled = false; } -int mixer_check_timing(void *ctx, struct fb_videomode *timing) +static int mixer_check_timing(void *ctx, struct fb_videomode *timing) { struct mixer_context *mixer_ctx = ctx; u32 w, h; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aae31489c893..7299ea45dd03 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -103,7 +103,7 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0a8eceb75902..e9b57893db2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support, "Enable Haswell and ValleyView Support. " "(default: false)"); +int i915_disable_power_well __read_mostly = 0; +module_param_named(disable_power_well, i915_disable_power_well, int, 0600); +MODULE_PARM_DESC(disable_power_well, + "Disable the power well when possible (default: false)"); + static struct drm_driver driver; extern int intel_agp_enabled; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e95337c97459..01769e2a9953 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_enable_ppgtt __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly; +extern int i915_disable_power_well __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2f2daebd0eef..9a48e1a2d417 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) if (eb == NULL) { int size = args->buffer_count; int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; - BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); + BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); while (count > 2*size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + @@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count) { int i; + int relocs_total = 0; + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); for (i = 0; i < count; i++) { char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; @@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) return -EINVAL; - /* First check for malicious input causing overflow */ - if (exec[i].relocation_count > - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + /* First check for malicious input causing overflow in + * the worst case where we need to allocate the entire + * relocation tree as a single array. + */ + if (exec[i].relocation_count > relocs_max - relocs_total) return -EINVAL; + relocs_total += exec[i].relocation_count; length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 32a3693905ec..1ce45a0a2d3e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -45,6 +45,9 @@ struct intel_crt { struct intel_encoder base; + /* DPMS state is stored in the connector, which we need in the + * encoder's enable/disable callbacks */ + struct intel_connector *connector; bool force_hotplug_required; u32 adpa_reg; }; @@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, return true; } -static void intel_disable_crt(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 temp; - - temp = I915_READ(crt->adpa_reg); - temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; - temp &= ~ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, temp); -} - -static void intel_enable_crt(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 temp; - - temp = I915_READ(crt->adpa_reg); - temp |= ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, temp); -} - /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) I915_WRITE(crt->adpa_reg, temp); } +static void intel_disable_crt(struct intel_encoder *encoder) +{ + intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void intel_enable_crt(struct intel_encoder *encoder) +{ + struct intel_crt *crt = intel_encoder_to_crt(encoder); + + intel_crt_set_dpms(encoder, crt->connector->base.dpms); +} + + static void intel_crt_dpms(struct drm_connector *connector, int mode) { struct drm_device *dev = connector->dev; @@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev) } connector = &intel_connector->base; + crt->connector = intel_connector; drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 287b42c9d1a8..b20d50192fcc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, num_connectors++; } + if (is_cpu_edp) + intel_crtc->cpu_transcoder = TRANSCODER_EDP; + else + intel_crtc->cpu_transcoder = pipe; + /* We are not sure yet this won't happen. */ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); @@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; int ret; - if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) - intel_crtc->cpu_transcoder = TRANSCODER_EDP; - else - intel_crtc->cpu_transcoder = pipe; - drm_vblank_pre_modeset(dev, pipe); ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6f728e5ee793..8fc93f90a7cd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct intel_link_m_n m_n; int pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; + int target_clock; /* * Find the lane count in the intel_encoder private @@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, } } + target_clock = mode->clock; + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + if (intel_encoder->type == INTEL_OUTPUT_EDP) { + target_clock = intel_edp_target_clock(intel_encoder, + mode); + break; + } + } + /* * Compute the GMCH and Link ratios. The '3' here is * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ intel_link_compute_m_n(intel_crtc->bpp, lane_count, - mode->clock, adjusted_mode->clock, &m_n); + target_clock, adjusted_mode->clock, &m_n); if (IS_HASWELL(dev)) { I915_WRITE(PIPE_DATA_M1(cpu_transcoder), @@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count && voltage_tries == 5) { + if (i == intel_dp->lane_count) { ++loop_tries; if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); @@ -2549,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dp_to_dev(intel_dp); i2c_del_adapter(&intel_dp->adapter); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + mutex_lock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); + mutex_unlock(&dev->mode_config.mutex); } kfree(intel_dig_port); } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index acf8aec9ada7..ef4744e1bf0b 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo->data = bus; } -#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) +/* + * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI + * mode. This results in spurious interrupt warnings if the legacy irq no. is + * shared with another device. The kernel then disables that interrupt source + * and so prevents the other device from working properly. + */ +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) static int gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2_status, @@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2 = 0; DEFINE_WAIT(wait); + if (!HAS_GMBUS_IRQ(dev_priv->dev)) + gmbus4_irq_en = 0; + /* Important: The hw handles only the first bit, so set only one! Since * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a3730e0289e5..bee8cb6108a7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - dev_priv->backlight_enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); - if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; @@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev, } set_level: - /* Check the current backlight level and try to set again if it's zero. - * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically - * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written. + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. */ - if (!intel_panel_get_backlight(dev)) - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); + dev_priv->backlight_enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a1794c6df1bf..adca00783e61 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable) if (!IS_HASWELL(dev)) return; + if (!i915_disable_power_well && !enable) + return; + tmp = I915_READ(HSW_PWR_WELL_DRIVER); is_enabled = tmp & HSW_PWR_WELL_STATE; enable_requested = tmp & HSW_PWR_WELL_ENABLE; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a274b9906ef8..78d8e919509f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) m = n = p = 0; vcomax = 800000; vcomin = 400000; - pllreffreq = 3333; + pllreffreq = 33333; delta = 0xffffffff; permitteddelta = clock * 5 / 1000; - for (testp = 16; testp > 0; testp--) { + for (testp = 16; testp > 0; testp >>= 1) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 33; testm++) { - for (testn = 1; testn < 257; testn++) { + for (testn = 17; testn < 257; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) @@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; - m = (testm - 1) | ((n >> 1) & 0x80); + m = (testm - 1); p = testp - 1; } if ((clock * testp) >= 600000) - p |= 80; + p |= 0x80; } } } @@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, int i; unsigned char misc = 0; unsigned char ext_vga[6]; - unsigned char ext_vga_index24; - unsigned char dac_index90 = 0; u8 bppshift; static unsigned char dacvalue[] = { @@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, option2 = 0x0000b000; break; case G200_ER: - dac_index90 = 0; break; } @@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, WREG_DAC(i, dacvalue[i]); } - if (mdev->type == G200_ER) { - WREG_DAC(0x90, dac_index90); - } - + if (mdev->type == G200_ER) + WREG_DAC(0x90, 0); if (option) pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); @@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (mdev->type == G200_WB) ext_vga[1] |= 0x88; - ext_vga_index24 = 0x05; - /* Set pixel clocks */ misc = 0x2d; WREG8(MGA_MISC_OUT, misc); @@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, } if (mdev->type == G200_ER) - WREG_ECRT(24, ext_vga_index24); + WREG_ECRT(0x24, 0x5); if (mdev->type == G200_EV) { WREG_ECRT(6, 0); diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c index 0daab62ea14c..3b2e7b6304d3 100644 --- a/drivers/gpu/drm/nouveau/core/core/object.c +++ b/drivers/gpu/drm/nouveau/core/core/object.c @@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) struct nouveau_object *parent = NULL; struct nouveau_object *namedb = NULL; struct nouveau_handle *handle = NULL; - int ret = -EINVAL; parent = nouveau_handle_ref(client, _parent); if (!parent) @@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) } nouveau_object_ref(NULL, &parent); - return ret; + return handle ? 0 : -EINVAL; } int diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 5fa13267bd9f..02e369f80449 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -544,13 +544,13 @@ nv50_disp_curs_ofuncs = { static void nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) { - nv_mask(event->priv, 0x61002c, (1 << head), (1 << head)); + nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); } static void nv50_disp_base_vblank_disable(struct nouveau_event *event, int head) { - nv_mask(event->priv, 0x61002c, (1 << head), (0 << head)); + nv_mask(event->priv, 0x61002c, (4 << head), 0); } static int diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h index 6b17b614629f..0b20fc0d19c1 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h @@ -4,7 +4,7 @@ #include <core/device.h> #include <core/subdev.h> -enum nouveau_therm_mode { +enum nouveau_therm_fan_mode { NOUVEAU_THERM_CTRL_NONE = 0, NOUVEAU_THERM_CTRL_MANUAL = 1, NOUVEAU_THERM_CTRL_AUTO = 2, diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index e816f06637a7..0e2c1a4f1659 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) } } +static void +nouveau_bios_shadow_platform(struct nouveau_bios *bios) +{ + struct pci_dev *pdev = nv_device(bios)->pdev; + size_t size; + + void __iomem *rom = pci_platform_rom(pdev, &size); + if (rom && size) { + bios->data = kmalloc(size, GFP_KERNEL); + if (bios->data) { + memcpy_fromio(bios->data, rom, size); + bios->size = size; + } + } +} + static int nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) { @@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios) { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, + { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, {} }; struct methods *mthd, *best; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index f794dc89a3b2..a00a5a76e2d6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm) } int -nouveau_therm_mode(struct nouveau_therm *therm, int mode) +nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode) { struct nouveau_therm_priv *priv = (void *)therm; struct nouveau_device *device = nv_device(therm); @@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode) (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) return -EINVAL; + /* do not allow automatic fan management if the thermal sensor is + * not available */ + if (priv->mode == 2 && therm->temp_get(therm) < 0) + return -EINVAL; + if (priv->mode == mode) return 0; - nv_info(therm, "Thermal management: %s\n", name[mode]); + nv_info(therm, "fan management: %s\n", name[mode]); nouveau_therm_update(therm, mode); return 0; } @@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm, priv->fan->bios.max_duty = value; return 0; case NOUVEAU_THERM_ATTR_FAN_MODE: - return nouveau_therm_mode(therm, value); + return nouveau_therm_fan_mode(therm, value); case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: priv->bios_sensor.thrs_fan_boost.temp = value; priv->sensor.program_alarms(therm); @@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object) return ret; if (priv->suspend >= 0) - nouveau_therm_mode(therm, priv->mode); + nouveau_therm_fan_mode(therm, priv->mode); priv->sensor.program_alarms(therm); return 0; } @@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent, int nouveau_therm_preinit(struct nouveau_therm *therm) { - nouveau_therm_ic_ctor(therm); nouveau_therm_sensor_ctor(therm); + nouveau_therm_ic_ctor(therm); nouveau_therm_fan_ctor(therm); - nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_sensor_preinit(therm); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index e24090bac195..8b3adec5fbb1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, struct i2c_board_info *info) { struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; struct i2c_client *client; request_module("%s%s", I2C_MODULE_PREFIX, info->type); @@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, } nv_info(priv, - "Found an %s at address 0x%x (controlled by lm_sensors)\n", - info->type, info->addr); + "Found an %s at address 0x%x (controlled by lm_sensors, " + "temp offset %+i C)\n", + info->type, info->addr, sensor->offset_constant); priv->ic = client; return true; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c index 0f5363edb964..a70d1b7e397b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c @@ -29,54 +29,83 @@ struct nv40_therm_priv { struct nouveau_therm_priv base; }; +enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; + +static enum nv40_sensor_style +nv40_sensor_style(struct nouveau_therm *therm) +{ + struct nouveau_device *device = nv_device(therm); + + switch (device->chipset) { + case 0x43: + case 0x44: + case 0x4a: + case 0x47: + return OLD_STYLE; + + case 0x46: + case 0x49: + case 0x4b: + case 0x4e: + case 0x4c: + case 0x67: + case 0x68: + case 0x63: + return NEW_STYLE; + default: + return INVALID_STYLE; + } +} + static int nv40_sensor_setup(struct nouveau_therm *therm) { - struct nouveau_device *device = nv_device(therm); + enum nv40_sensor_style style = nv40_sensor_style(therm); /* enable ADC readout and disable the ALARM threshold */ - if (device->chipset >= 0x46) { + if (style == NEW_STYLE) { nv_mask(therm, 0x15b8, 0x80000000, 0); nv_wr32(therm, 0x15b0, 0x80003fff); - mdelay(10); /* wait for the temperature to stabilize */ + mdelay(20); /* wait for the temperature to stabilize */ return nv_rd32(therm, 0x15b4) & 0x3fff; - } else { + } else if (style == OLD_STYLE) { nv_wr32(therm, 0x15b0, 0xff); + mdelay(20); /* wait for the temperature to stabilize */ return nv_rd32(therm, 0x15b4) & 0xff; - } + } else + return -ENODEV; } static int nv40_temp_get(struct nouveau_therm *therm) { struct nouveau_therm_priv *priv = (void *)therm; - struct nouveau_device *device = nv_device(therm); struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + enum nv40_sensor_style style = nv40_sensor_style(therm); int core_temp; - if (device->chipset >= 0x46) { + if (style == NEW_STYLE) { nv_wr32(therm, 0x15b0, 0x80003fff); core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; - } else { + } else if (style == OLD_STYLE) { nv_wr32(therm, 0x15b0, 0xff); core_temp = nv_rd32(therm, 0x15b4) & 0xff; - } - - /* Setup the sensor if the temperature is 0 */ - if (core_temp == 0) - core_temp = nv40_sensor_setup(therm); + } else + return -ENODEV; - if (sensor->slope_div == 0) - sensor->slope_div = 1; - if (sensor->offset_den == 0) - sensor->offset_den = 1; - if (sensor->slope_mult < 1) - sensor->slope_mult = 1; + /* if the slope or the offset is unset, do no use the sensor */ + if (!sensor->slope_div || !sensor->slope_mult || + !sensor->offset_num || !sensor->offset_den) + return -ENODEV; core_temp = core_temp * sensor->slope_mult / sensor->slope_div; core_temp = core_temp + sensor->offset_num / sensor->offset_den; core_temp = core_temp + sensor->offset_constant - 8; + /* reserve negative temperatures for errors */ + if (core_temp < 0) + core_temp = 0; + return core_temp; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h index 06b98706b3fc..438d9824b774 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h @@ -102,7 +102,7 @@ struct nouveau_therm_priv { struct i2c_client *ic; }; -int nouveau_therm_mode(struct nouveau_therm *therm, int mode); +int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode); int nouveau_therm_attr_get(struct nouveau_therm *therm, enum nouveau_therm_attr_type type); int nouveau_therm_attr_set(struct nouveau_therm *therm, @@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm); int nouveau_therm_preinit(struct nouveau_therm *); +void nouveau_therm_sensor_preinit(struct nouveau_therm *); void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, enum nouveau_therm_thrs thrs, enum nouveau_therm_thrs_state st); diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index b37624af8297..470f6a47b656 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm) { struct nouveau_therm_priv *priv = (void *)therm; - priv->bios_sensor.slope_mult = 1; - priv->bios_sensor.slope_div = 1; - priv->bios_sensor.offset_num = 0; - priv->bios_sensor.offset_den = 1; priv->bios_sensor.offset_constant = 0; priv->bios_sensor.thrs_fan_boost.temp = 90; @@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm) struct nouveau_therm_priv *priv = (void *)therm; struct nvbios_therm_sensor *s = &priv->bios_sensor; - if (!priv->bios_sensor.slope_div) - priv->bios_sensor.slope_div = 1; - if (!priv->bios_sensor.offset_den) - priv->bios_sensor.offset_den = 1; - /* enforce a minimum hysteresis on thresholds */ s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); @@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, const char *thresolds[] = { "fanboost", "downclock", "critical", "shutdown" }; - uint8_t temperature = therm->temp_get(therm); + int temperature = therm->temp_get(therm); if (thrs < 0 || thrs > 3) return; if (dir == NOUVEAU_THERM_THRS_FALLING) - nv_info(therm, "temperature (%u C) went below the '%s' threshold\n", + nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", temperature, thresolds[thrs]); else - nv_info(therm, "temperature (%u C) hit the '%s' threshold\n", + nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", temperature, thresolds[thrs]); active = (dir == NOUVEAU_THERM_THRS_RISING); @@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, case NOUVEAU_THERM_THRS_FANBOOST: if (active) { nouveau_therm_fan_set(therm, true, 100); - nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO); } break; case NOUVEAU_THERM_THRS_DOWNCLOCK: @@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm) NOUVEAU_THERM_THRS_SHUTDOWN); /* schedule the next poll in one second */ - if (list_empty(&alarm->head)) + if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); @@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm) alarm_timer_callback(&priv->sensor.therm_poll_alarm); } +void +nouveau_therm_sensor_preinit(struct nouveau_therm *therm) +{ + const char *sensor_avail = "yes"; + + if (therm->temp_get(therm) < 0) + sensor_avail = "no"; + + nv_info(therm, "internal sensor: %s\n", sensor_avail); +} + int nouveau_therm_sensor_ctor(struct nouveau_therm *therm) { diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 41241922263f..5eb3e0da7c6e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -116,6 +116,11 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, { struct nouveau_abi16_ntfy *ntfy, *temp; + /* wait for all activity to stop before releasing notify object, which + * may be still in use */ + if (chan->chan && chan->ntfy) + nouveau_channel_idle(chan->chan); + /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { nouveau_abi16_ntfy_fini(chan, ntfy); @@ -386,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan, *temp; + struct nouveau_abi16_chan *chan = NULL, *temp; struct nouveau_abi16_ntfy *ntfy; struct nouveau_object *object; struct nv_dma_class args = {}; @@ -399,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) return nouveau_abi16_put(abi16, -EINVAL); - list_for_each_entry_safe(chan, temp, &abi16->channels, head) { - if (chan->chan->handle == (NVDRM_CHAN | info->channel)) + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { + chan = temp; break; - chan = NULL; + } } if (!chan) @@ -454,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) { struct drm_nouveau_gpuobj_free *fini = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan, *temp; + struct nouveau_abi16_chan *chan = NULL, *temp; struct nouveau_abi16_ntfy *ntfy; int ret; if (unlikely(!abi16)) return -ENOMEM; - list_for_each_entry_safe(chan, temp, &abi16->channels, head) { - if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { + chan = temp; break; - chan = NULL; + } } if (!chan) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 11ca82148edc..7ff10711a4d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -801,7 +801,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, stride = 16 * 4; height = amount / stride; - if (new_mem->mem_type == TTM_PL_VRAM && + if (old_mem->mem_type == TTM_PL_VRAM && nouveau_bo_tile_layout(nvbo)) { ret = RING_SPACE(chan, 8); if (ret) @@ -823,7 +823,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); OUT_RING (chan, 1); } - if (old_mem->mem_type == TTM_PL_VRAM && + if (new_mem->mem_type == TTM_PL_VRAM && nouveau_bo_tile_layout(nvbo)) { ret = RING_SPACE(chan, 8); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d1099365bfc1..c95decf543e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400); static struct drm_driver driver; static int +nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) +{ + struct nouveau_drm *drm = + container_of(event, struct nouveau_drm, vblank[head]); + drm_handle_vblank(drm->dev, head); + return NVKM_EVENT_KEEP; +} + +static int nouveau_drm_vblank_enable(struct drm_device *dev, int head) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_disp *pdisp = nouveau_disp(drm->device); - nouveau_event_get(pdisp->vblank, head, &drm->vblank); + + if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) + return -EIO; + WARN_ON_ONCE(drm->vblank[head].func); + drm->vblank[head].func = nouveau_drm_vblank_handler; + nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); return 0; } @@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_disp *pdisp = nouveau_disp(drm->device); - nouveau_event_put(pdisp->vblank, head, &drm->vblank); -} - -static int -nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) -{ - struct nouveau_drm *drm = - container_of(event, struct nouveau_drm, vblank); - drm_handle_vblank(drm->dev, head); - return NVKM_EVENT_KEEP; + if (drm->vblank[head].func) + nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); + else + WARN_ON_ONCE(1); + drm->vblank[head].func = NULL; } static u64 @@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) dev->dev_private = drm; drm->dev = dev; - drm->vblank.func = nouveau_drm_vblank_handler; INIT_LIST_HEAD(&drm->clients); spin_lock_init(&drm->tile.lock); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b25df374c901..9c39bafbef2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -113,7 +113,7 @@ struct nouveau_drm { struct nvbios vbios; struct nouveau_display *display; struct backlight_device *backlight; - struct nouveau_eventh vblank; + struct nouveau_eventh vblank[4]; /* power management */ struct nouveau_pm *pm; diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index bb54098c6d97..936b442a6ab7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_therm *therm = nouveau_therm(drm->device); + int temp = therm->temp_get(therm); - return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000); + if (temp < 0) + return temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, NULL, 0); @@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm1_max, nouveau_hwmon_set_pwm1_max, 0); -static struct attribute *hwmon_attributes[] = { +static struct attribute *hwmon_default_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_update_rate.dev_attr.attr, + NULL +}; +static struct attribute *hwmon_temp_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, @@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp1_emergency.dev_attr.attr, &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, - &sensor_dev_attr_name.dev_attr.attr, - &sensor_dev_attr_update_rate.dev_attr.attr, NULL }; static struct attribute *hwmon_fan_rpm_attributes[] = { @@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = { NULL }; -static const struct attribute_group hwmon_attrgroup = { - .attrs = hwmon_attributes, +static const struct attribute_group hwmon_default_attrgroup = { + .attrs = hwmon_default_attributes, +}; +static const struct attribute_group hwmon_temp_attrgroup = { + .attrs = hwmon_temp_attributes, }; static const struct attribute_group hwmon_fan_rpm_attrgroup = { .attrs = hwmon_fan_rpm_attributes, @@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev) } dev_set_drvdata(hwmon_dev, dev); - /* default sysfs entries */ - ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup); + /* set the default attributes */ + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); if (ret) { if (ret) goto error; } + /* if the card has a working thermal sensor */ + if (therm->temp_get(therm) >= 0) { + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); + if (ret) { + if (ret) + goto error; + } + } + /* if the card has a pwm fan */ /*XXX: incorrect, need better detection for this, some boards have * the gpio entries for pwm fan control even when there's no @@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev) struct nouveau_pm *pm = nouveau_pm(dev); if (pm->hwmon) { - sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); - sysfs_remove_group(&pm->hwmon->kobj, - &hwmon_pwm_fan_attrgroup); - sysfs_remove_group(&pm->hwmon->kobj, - &hwmon_fan_rpm_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); hwmon_device_unregister(pm->hwmon); } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 87a5a56ed358..1ddc03e51bf4 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data) { struct nv50_display_flip *flip = data; if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == - flip->chan->data); + flip->chan->data) return true; usleep_range(1, 2); return false; @@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, swap_interval <<= 4; if (swap_interval == 0) swap_interval |= 0x100; + if (chan == NULL) + evo_sync(crtc->dev); push = evo_wait(sync, 128); if (unlikely(push == NULL)) @@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, sync->addr ^= 0x10; sync->data++; FIRE_RING (chan); - } else { - evo_sync(crtc->dev); } /* queue the flip */ @@ -2276,6 +2276,7 @@ nv50_display_create(struct drm_device *dev) NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n", dcbe->location, dcbe->type, ffs(dcbe->or) - 1, ret); + ret = 0; } } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index d4c633e12863..27769e724b6d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x9907) || (rdev->pdev->device == 0x9908) || (rdev->pdev->device == 0x9909) || + (rdev->pdev->device == 0x990B) || + (rdev->pdev->device == 0x990C) || + (rdev->pdev->device == 0x990F) || (rdev->pdev->device == 0x9910) || - (rdev->pdev->device == 0x9917)) { + (rdev->pdev->device == 0x9917) || + (rdev->pdev->device == 0x9999)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x9903) || (rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x990A) || + (rdev->pdev->device == 0x990D) || + (rdev->pdev->device == 0x990E) || (rdev->pdev->device == 0x9913) || (rdev->pdev->device == 0x9918)) { rdev->config.cayman.max_simds_per_se = 4; @@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9991) || (rdev->pdev->device == 0x9994) || + (rdev->pdev->device == 0x9995) || + (rdev->pdev->device == 0x9996) || + (rdev->pdev->device == 0x999A) || (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; @@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - tmp = gb_addr_config & NUM_PIPES_MASK; - tmp = r6xx_remap_render_backend(rdev, tmp, - rdev->config.cayman.max_backends_per_se * - rdev->config.cayman.max_shader_engines, - CAYMAN_MAX_BACKENDS, disabled_rb_mask); + if ((rdev->config.cayman.max_backends_per_se == 1) && + (rdev->flags & RADEON_IS_IGP)) { + if ((disabled_rb_mask & 3) == 1) { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; + } else { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, + rdev->config.cayman.max_backends_per_se * + rdev->config.cayman.max_shader_engines, + CAYMAN_MAX_BACKENDS, disabled_rb_mask); + } WREG32(GB_BACKEND_MAP, tmp); cgts_tcc_disable = 0xffff0000; @@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); evergreen_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index bedda9caadd9..6e05a2e75a46 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, goto out_cleanup; } - /* r100 doesn't have dma engine so skip the test */ - /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ - /* skip it as well if domains are the same */ - if ((rdev->asic->copy.dma) && (sdomain != ddomain)) { + if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, RADEON_BENCHMARK_COPY_DMA, n); if (time < 0) @@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, sdomain, ddomain, "dma"); } - time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); - if (time < 0) - goto out_cleanup; - if (time > 0) - radeon_benchmark_log_results(n, size, time, - sdomain, ddomain, "blit"); + if (rdev->asic->copy.blit) { + time = radeon_benchmark_do_move(rdev, size, saddr, daddr, + RADEON_BENCHMARK_COPY_BLIT, n); + if (time < 0) + goto out_cleanup; + if (time > 0) + radeon_benchmark_log_results(n, size, time, + sdomain, ddomain, "blit"); + } out_cleanup: if (sobj) { diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b8015913d382..fa3c56fba294 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } +static bool radeon_read_platform_bios(struct radeon_device *rdev) +{ + uint8_t __iomem *bios; + size_t size; + + rdev->bios = NULL; + + bios = pci_platform_rom(rdev->pdev, &size); + if (!bios) { + return false; + } + + if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + return false; + } + rdev->bios = kmemdup(bios, size, GFP_KERNEL); + if (rdev->bios == NULL) { + return false; + } + + return true; +} + #ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. @@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev) if (r == false) { r = radeon_read_disabled_bios(rdev); } + if (r == false) { + r = radeon_read_platform_bios(rdev); + } if (r == false || rdev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); rdev->bios = NULL; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 9128120da044..bafbe3216952 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { + radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); si_irq_suspend(rdev); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index fe5cdbcf2636..b44d548c56f8 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector) int ret; edid = (struct edid *)udl_get_edid(udl); + if (!edid) { + drm_mode_connector_update_edid_property(connector, NULL); + return 0; + } /* * We only read the main block, but if the monitor reports extension diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 512b01c04ea7..aa341d135867 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, @@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev) hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) return true; break; + case USB_VENDOR_ID_ATMEL_V_USB: + /* Masterkit MA901 usb radio based on Atmel tiny85 chip and + * it has the same USB ID as many Atmel V-USB devices. This + * usb radio is handled by radio-ma901.c driver so we want + * ignore the hid. Check the name, bus, product and ignore + * if we have MA901 usb radio. + */ + if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && + hdev->bus == BUS_USB && + strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) + return true; + break; } if (hdev->type == HID_TYPE_USBMOUSE && diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 92e47e5c9564..5309fd5eb0eb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -158,6 +158,8 @@ #define USB_VENDOR_ID_ATMEL 0x03eb #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 +#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0 +#define USB_DEVICE_ID_ATMEL_V_USB 0x05df #define USB_VENDOR_ID_AUREAL 0x0755 #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 @@ -557,9 +559,6 @@ #define USB_VENDOR_ID_MADCATZ 0x0738 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 -#define USB_VENDOR_ID_MASTERKIT 0x16c0 -#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df - #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a @@ -590,6 +589,9 @@ #define USB_VENDOR_ID_MONTEREY 0x0566 #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 +#define USB_VENDOR_ID_MSI 0x1770 +#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00 + #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 #define USB_DEVICE_ID_N_S_HARMONY 0xc359 @@ -684,6 +686,9 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 +#define USB_VENDOR_ID_REALTEK 0x0bda +#define USB_DEVICE_ID_REALTEK_READER 0x0152 + #define USB_VENDOR_ID_ROCCAT 0x1e7d #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index f7f113ba083e..a8ce44296cfd 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev, return 0; } +static void magicmouse_input_configured(struct hid_device *hdev, + struct hid_input *hi) + +{ + struct magicmouse_sc *msc = hid_get_drvdata(hdev); + + int ret = magicmouse_setup_input(msc->input, hdev); + if (ret) { + hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); + /* clean msc->input to notify probe() of the failure */ + msc->input = NULL; + } +} + + static int magicmouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev, goto err_free; } - /* We do this after hid-input is done parsing reports so that - * hid-input uses the most natural button and axis IDs. - */ - if (msc->input) { - ret = magicmouse_setup_input(msc->input, hdev); - if (ret) { - hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); - goto err_stop_hw; - } + if (!msc->input) { + hid_err(hdev, "magicmouse input not registered\n"); + ret = -ENOMEM; + goto err_stop_hw; } if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) @@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = { .remove = magicmouse_remove, .raw_event = magicmouse_raw_event, .input_mapping = magicmouse_input_mapping, + .input_configured = magicmouse_input_configured, }; module_hid_driver(magicmouse_driver); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 7a1ebb867cf4..82e9211b3ca9 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, { struct mt_device *td = hid_get_drvdata(hid); __s32 quirks = td->mtclass.quirks; + struct input_dev *input = field->hidinput->input; if (hid->claimed & HID_CLAIMED_INPUT) { switch (usage->hid) { @@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, break; default: + if (usage->type) + input_event(input, usage->type, usage->code, + value); return; } if (usage->usage_index + 1 == field->report_count) { /* we only take into account the last report. */ if (usage->hid == td->last_slot_field) - mt_complete_slot(td, field->hidinput->input); + mt_complete_slot(td, input); if (field->index == td->last_field_index && td->num_received >= td->num_expected) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index e0e6abf1cd3b..19b8360f2330 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -73,6 +73,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, @@ -80,6 +81,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c index 41df29f59b0e..ebbb9f4f27a3 100644 --- a/drivers/hwmon/lineage-pem.c +++ b/drivers/hwmon/lineage-pem.c @@ -422,6 +422,7 @@ static struct attribute *pem_input_attributes[] = { &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, + NULL }; static const struct attribute_group pem_input_group = { @@ -432,6 +433,7 @@ static struct attribute *pem_fan_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, + NULL }; static const struct attribute_group pem_fan_group = { diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h index 668ff4721323..5cde94e56f17 100644 --- a/drivers/hwmon/lm75.h +++ b/drivers/hwmon/lm75.h @@ -25,7 +25,7 @@ which contains this code, we don't worry about the wasted space. */ -#include <linux/hwmon.h> +#include <linux/kernel.h> /* straight from the datasheet */ #define LM75_TEMP_MIN (-55000) diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c index a58de38e23d8..6d6130752f94 100644 --- a/drivers/hwmon/pmbus/ltc2978.c +++ b/drivers/hwmon/pmbus/ltc2978.c @@ -59,7 +59,7 @@ enum chips { ltc2978, ltc3880 }; struct ltc2978_data { enum chips id; int vin_min, vin_max; - int temp_min, temp_max; + int temp_min, temp_max[2]; int vout_min[8], vout_max[8]; int iout_max[2]; int temp2_max; @@ -113,9 +113,10 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page, ret = pmbus_read_word_data(client, page, LTC2978_MFR_TEMPERATURE_PEAK); if (ret >= 0) { - if (lin11_to_val(ret) > lin11_to_val(data->temp_max)) - data->temp_max = ret; - ret = data->temp_max; + if (lin11_to_val(ret) + > lin11_to_val(data->temp_max[page])) + data->temp_max[page] = ret; + ret = data->temp_max[page]; } break; case PMBUS_VIRT_RESET_VOUT_HISTORY: @@ -266,7 +267,7 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page, break; case PMBUS_VIRT_RESET_TEMP_HISTORY: data->temp_min = 0x7bff; - data->temp_max = 0x7c00; + data->temp_max[page] = 0x7c00; ret = ltc2978_clear_peaks(client, page, data->id); break; default: @@ -323,7 +324,8 @@ static int ltc2978_probe(struct i2c_client *client, data->vin_min = 0x7bff; data->vin_max = 0x7c00; data->temp_min = 0x7bff; - data->temp_max = 0x7c00; + for (i = 0; i < ARRAY_SIZE(data->temp_max); i++) + data->temp_max[i] = 0x7c00; data->temp2_max = 0x7c00; switch (data->id) { diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 80eef50c50fd..9add60920ac0 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -766,12 +766,14 @@ static ssize_t pmbus_show_label(struct device *dev, static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr) { if (data->num_attributes >= data->max_attributes - 1) { - data->max_attributes += PMBUS_ATTR_ALLOC_SIZE; - data->group.attrs = krealloc(data->group.attrs, - sizeof(struct attribute *) * - data->max_attributes, GFP_KERNEL); - if (data->group.attrs == NULL) + int new_max_attrs = data->max_attributes + PMBUS_ATTR_ALLOC_SIZE; + void *new_attrs = krealloc(data->group.attrs, + new_max_attrs * sizeof(void *), + GFP_KERNEL); + if (!new_attrs) return -ENOMEM; + data->group.attrs = new_attrs; + data->max_attributes = new_max_attrs; } data->group.attrs[data->num_attributes++] = attr; diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index db713c0dfba4..461a0d739d75 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "%s: can't power on device\n", __func__); + pm_runtime_put_noidle(dev); + module_put(dev->driver->owner); return ret; } diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 46cde098c11c..e380c6eef3af 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -4,7 +4,6 @@ menuconfig I2C tristate "I2C support" - depends on !S390 select RT_MUTEXES ---help--- I2C (pronounce: I-squared-C) is a slow serial bus protocol used in @@ -76,6 +75,7 @@ config I2C_HELPER_AUTO config I2C_SMBUS tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO + depends on GENERIC_HARDIRQS help Say Y here if you want support for SMBus extensions to the I2C specification. At the moment, the only supported extension is diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a3725de92384..adfee98486b1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -114,7 +114,7 @@ config I2C_I801 config I2C_ISCH tristate "Intel SCH SMBus 1.0" - depends on PCI + depends on PCI && GENERIC_HARDIRQS select LPC_SCH help Say Y here if you want to use SMBus controller on the Intel SCH @@ -543,6 +543,7 @@ config I2C_NUC900 config I2C_OCORES tristate "OpenCores I2C Controller" + depends on GENERIC_HARDIRQS help If you say yes to this option, support will be included for the OpenCores I2C controller. For details see @@ -777,7 +778,7 @@ config I2C_DIOLAN_U2C config I2C_PARPORT tristate "Parallel port adapter" - depends on PARPORT + depends on PARPORT && GENERIC_HARDIRQS select I2C_ALGOBIT select I2C_SMBUS help @@ -802,6 +803,7 @@ config I2C_PARPORT config I2C_PARPORT_LIGHT tristate "Parallel port adapter (light)" + depends on GENERIC_HARDIRQS select I2C_ALGOBIT select I2C_SMBUS help diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0ceb6e1b0f65..e3085c487ace 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev) adap->algo = &i2c_dw_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; - ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev)); r = i2c_add_numbered_adapter(adap); if (r) { diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index e9205ee8cf94..130f02cc9d94 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -80,6 +80,7 @@ /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a +#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 #define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ @@ -185,6 +186,7 @@ struct ismt_priv { static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 36704e3ab3fa..b714776b6ddd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; u32 clk_divisor; - tegra_i2c_clock_enable(i2c_dev); + err = tegra_i2c_clock_enable(i2c_dev); + if (err < 0) { + dev_err(i2c_dev->dev, "Clock enable failed %d\n", err); + return err; + } tegra_periph_reset_assert(i2c_dev->div_clk); udelay(2); @@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (i2c_dev->is_suspended) return -EBUSY; - tegra_i2c_clock_enable(i2c_dev); + ret = tegra_i2c_clock_enable(i2c_dev); + if (ret < 0) { + dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret); + return ret; + } + for (i = 0; i < num; i++) { enum msg_end_type end_type = MSG_END_STOP; if (i < (num - 1)) { diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index f3b8f9a6a89b..966a18a5d12d 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -3,7 +3,7 @@ * * Copyright (c) 2010 Ericsson AB. * - * Author: Guenter Roeck <guenter.roeck@ericsson.com> + * Author: Guenter Roeck <linux@roeck-us.net> * * Derived from: * pca954x.c diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d6675013864..1a38dd7dfe4e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x3c, idle_cpu_hsw), ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), + ICPU(0x46, idle_cpu_hsw), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 0198324a8b0c..bd33473f8e38 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -62,7 +62,7 @@ st_sensors_match_odr_error: int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) { int err; - struct st_sensor_odr_avl odr_out; + struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); err = st_sensors_match_odr(sdata->sensor, odr, &odr_out); @@ -114,7 +114,7 @@ st_sensors_match_odr_error: static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs) { - int err, i; + int err, i = 0; struct st_sensor_data *sdata = iio_priv(indio_dev); err = st_sensors_match_fs(sdata->sensor, fs, &i); @@ -139,14 +139,13 @@ st_accel_set_fullscale_error: int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable) { - bool found; u8 tmp_value; int err = -EINVAL; - struct st_sensor_odr_avl odr_out; + bool found = false; + struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); if (enable) { - found = false; tmp_value = sdata->sensor->pw.value_on; if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) && (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) { diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 2fe1d4edcb2f..74f2d52795f6 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -27,7 +27,6 @@ #define AD5064_ADDR(x) ((x) << 20) #define AD5064_CMD(x) ((x) << 24) -#define AD5064_ADDR_DAC(chan) (chan) #define AD5064_ADDR_ALL_DAC 0xF #define AD5064_CMD_WRITE_INPUT_N 0x0 @@ -131,15 +130,15 @@ static int ad5064_write(struct ad5064_state *st, unsigned int cmd, } static int ad5064_sync_powerdown_mode(struct ad5064_state *st, - unsigned int channel) + const struct iio_chan_spec *chan) { unsigned int val; int ret; - val = (0x1 << channel); + val = (0x1 << chan->address); - if (st->pwr_down[channel]) - val |= st->pwr_down_mode[channel] << 8; + if (st->pwr_down[chan->channel]) + val |= st->pwr_down_mode[chan->channel] << 8; ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0); @@ -169,7 +168,7 @@ static int ad5064_set_powerdown_mode(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); st->pwr_down_mode[chan->channel] = mode + 1; - ret = ad5064_sync_powerdown_mode(st, chan->channel); + ret = ad5064_sync_powerdown_mode(st, chan); mutex_unlock(&indio_dev->mlock); return ret; @@ -205,7 +204,7 @@ static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); st->pwr_down[chan->channel] = pwr_down; - ret = ad5064_sync_powerdown_mode(st, chan->channel); + ret = ad5064_sync_powerdown_mode(st, chan); mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } @@ -258,7 +257,7 @@ static int ad5064_write_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - if (val > (1 << chan->scan_type.realbits) || val < 0) + if (val >= (1 << chan->scan_type.realbits) || val < 0) return -EINVAL; mutex_lock(&indio_dev->mlock); @@ -292,34 +291,44 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { { }, }; -#define AD5064_CHANNEL(chan, bits) { \ +#define AD5064_CHANNEL(chan, addr, bits) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .channel = (chan), \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ - .address = AD5064_ADDR_DAC(chan), \ + .address = addr, \ .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \ .ext_info = ad5064_ext_info, \ } #define DECLARE_AD5064_CHANNELS(name, bits) \ const struct iio_chan_spec name[] = { \ - AD5064_CHANNEL(0, bits), \ - AD5064_CHANNEL(1, bits), \ - AD5064_CHANNEL(2, bits), \ - AD5064_CHANNEL(3, bits), \ - AD5064_CHANNEL(4, bits), \ - AD5064_CHANNEL(5, bits), \ - AD5064_CHANNEL(6, bits), \ - AD5064_CHANNEL(7, bits), \ + AD5064_CHANNEL(0, 0, bits), \ + AD5064_CHANNEL(1, 1, bits), \ + AD5064_CHANNEL(2, 2, bits), \ + AD5064_CHANNEL(3, 3, bits), \ + AD5064_CHANNEL(4, 4, bits), \ + AD5064_CHANNEL(5, 5, bits), \ + AD5064_CHANNEL(6, 6, bits), \ + AD5064_CHANNEL(7, 7, bits), \ +} + +#define DECLARE_AD5065_CHANNELS(name, bits) \ +const struct iio_chan_spec name[] = { \ + AD5064_CHANNEL(0, 0, bits), \ + AD5064_CHANNEL(1, 3, bits), \ } static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); +static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); +static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); +static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); + static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { [ID_AD5024] = { .shared_vref = false, @@ -328,7 +337,7 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { }, [ID_AD5025] = { .shared_vref = false, - .channels = ad5024_channels, + .channels = ad5025_channels, .num_channels = 2, }, [ID_AD5044] = { @@ -338,7 +347,7 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { }, [ID_AD5045] = { .shared_vref = false, - .channels = ad5044_channels, + .channels = ad5045_channels, .num_channels = 2, }, [ID_AD5064] = { @@ -353,7 +362,7 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { }, [ID_AD5065] = { .shared_vref = false, - .channels = ad5064_channels, + .channels = ad5065_channels, .num_channels = 2, }, [ID_AD5628_1] = { @@ -429,6 +438,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type, { struct iio_dev *indio_dev; struct ad5064_state *st; + unsigned int midscale; unsigned int i; int ret; @@ -465,11 +475,6 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type, goto error_free_reg; } - for (i = 0; i < st->chip_info->num_channels; ++i) { - st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K; - st->dac_cache[i] = 0x8000; - } - indio_dev->dev.parent = dev; indio_dev->name = name; indio_dev->info = &ad5064_info; @@ -477,6 +482,13 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type, indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; + midscale = (1 << indio_dev->channels[0].scan_type.realbits) / 2; + + for (i = 0; i < st->chip_info->num_channels; ++i) { + st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K; + st->dac_cache[i] = midscale; + } + ret = iio_device_register(indio_dev); if (ret) goto error_disable_reg; diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index b5cfa3a354cf..361b2328453d 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -5,6 +5,7 @@ config INV_MPU6050_IIO tristate "Invensense MPU6050 devices" depends on I2C && SYSFS + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help This driver supports the Invensense MPU6050 devices. diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 565bfb161c1a..a3fde52840ca 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1575,6 +1575,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep) neigh = dst_neigh_lookup(ep->dst, &ep->com.cm_id->remote_addr.sin_addr.s_addr); + if (!neigh) { + pr_err("%s - cannot alloc neigh.\n", __func__); + err = -ENOMEM; + goto fail4; + } + /* get a l2t entry */ if (neigh->dev->flags & IFF_LOOPBACK) { PDBG("%s LOOPBACK\n", __func__); @@ -3053,6 +3059,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) dst = &rt->dst; neigh = dst_neigh_lookup_skb(dst, skb); + if (!neigh) { + pr_err("%s - failed to allocate neigh!\n", + __func__); + goto free_dst; + } + if (neigh->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, iph->daddr); e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 17ba4f8bc12d..70b1808a08f4 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL); - if (!wq->rq.queue) + if (!wq->rq.queue) { + ret = -ENOMEM; goto free_sq; + } PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", __func__, wq->sq.queue, (unsigned long long)virt_to_phys(wq->sq.queue), diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 439c35d4a669..ea93870266eb 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, goto bail; } - opcode = be32_to_cpu(ohdr->bth[0]) >> 24; + opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; dev->opstats[opcode].n_bytes += tlen; dev->opstats[opcode].n_packets++; diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index e0d79b2395e4..add98d01476c 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -362,7 +362,6 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) INIT_LIST_HEAD(&dev->sriov.cm_list); dev->sriov.sl_id_map = RB_ROOT; idr_init(&dev->sriov.pv_id_table); - idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL); } /* slave = -1 ==> all slaves */ diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 8349f9c5064c..1e603a375069 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig @@ -1,7 +1,7 @@ config INFINIBAND_QIB - tristate "QLogic PCIe HCA support" + tristate "Intel PCIe HCA support" depends on 64BIT ---help--- - This is a low-level driver for QLogic PCIe QLE InfiniBand host - channel adapters. This driver does not support the QLogic + This is a low-level driver for Intel PCIe QLE InfiniBand host + channel adapters. This driver does not support the Intel HyperTransport card (model QHT7140). diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 5423edcab51f..216092477dfc 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate, "Attempt pre-IBTA 1.2 DDR speed negotiation"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("QLogic <support@qlogic.com>"); -MODULE_DESCRIPTION("QLogic IB driver"); +MODULE_AUTHOR("Intel <ibsupport@intel.com>"); +MODULE_DESCRIPTION("Intel IB driver"); MODULE_VERSION(QIB_DRIVER_VERSION); /* diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a099ac171e22..0232ae56b1fa 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. @@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64); /* * This file contains all the chip-specific register information and - * access functions for the QLogic QLogic_IB PCI-Express chip. + * access functions for the Intel Intel_IB PCI-Express chip. * */ diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 50e33aa0b4e3..173f805790da 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd) static void qib_remove_one(struct pci_dev *); static int qib_init_one(struct pci_dev *, const struct pci_device_id *); -#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " +#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { @@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dd = qib_init_iba6120_funcs(pdev, ent); #else qib_early_err(&pdev->dev, - "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", + "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", ent->device); dd = ERR_PTR(-ENODEV); #endif @@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) default: qib_early_err(&pdev->dev, - "Failing on unknown QLogic deviceid 0x%x\n", + "Failing on unknown Intel deviceid 0x%x\n", ent->device); ret = -ENODEV; } diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 50a8a0d4fe67..911205d3d5a0 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index ba51a4715a1d..7c0ab16a2fe2 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * @@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->dma_ops = &qib_dma_mapping_ops; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), - "QLogic Infiniband HCA %s", init_utsname()->nodename); + "Intel Infiniband HCA %s", init_utsname()->nodename); ret = ib_register_device(ibdev, qib_create_port_files); if (ret) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 67b0c1d23678..1ef880de3a41 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); - if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) - ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); + rc = ib_req_notify_cq(priv->send_cq, + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); + if (rc < 0) + ipoib_warn(priv, "request notify on send CQ failed\n"); + else if (rc) + ipoib_send_comp_handler(priv->send_cq, dev); } } } diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 7cd74e29cbc8..9135606c8649 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -158,14 +158,10 @@ static unsigned int get_time_pit(void) #define GET_TIME(x) rdtscl(x) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "TSC" -#elif defined(__alpha__) +#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE) #define GET_TIME(x) do { x = get_cycles(); } while (0) #define DELTA(x,y) ((y)-(x)) -#define TIME_NAME "PCC" -#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) -#define GET_TIME(x) do { x = get_cycles(); } while (0) -#define DELTA(x, y) ((x) - (y)) -#define TIME_NAME "TSC" +#define TIME_NAME "get_cycles" #else #define FAKE_TIME static unsigned long analog_faketime = 0; diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 2fb0d76a04c4..208de7cbb7fa 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c @@ -70,8 +70,6 @@ #define TC3589x_EVT_INT_CLR 0x2 #define TC3589x_KBD_INT_CLR 0x1 -#define TC3589x_KBD_KEYMAP_SIZE 64 - /** * struct tc_keypad - data structure used by keypad driver * @tc3589x: pointer to tc35893 @@ -88,7 +86,7 @@ struct tc_keypad { const struct tc3589x_keypad_platform_data *board; unsigned int krow; unsigned int kcol; - unsigned short keymap[TC3589x_KBD_KEYMAP_SIZE]; + unsigned short *keymap; bool keypad_stopped; }; @@ -338,12 +336,14 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) error = matrix_keypad_build_keymap(plat->keymap_data, NULL, TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL, - keypad->keymap, input); + NULL, input); if (error) { dev_err(&pdev->dev, "Failed to build keymap\n"); goto err_free_mem; } + keypad->keymap = input->keycode; + input_set_capability(input, EV_MSC, MSC_SCAN); if (!plat->no_autorepeat) __set_bit(EV_REP, input->evbit); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 7b99fc7c9438..0238e0e14335 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -490,6 +490,29 @@ static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p) f->y_map |= (p[5] & 0x20) << 6; } +static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p) +{ + f->first_mp = !!(p[0] & 0x02); + f->is_mp = !!(p[0] & 0x20); + + f->fingers = ((p[0] & 0x6) >> 1 | + (p[0] & 0x10) >> 2); + f->x_map = ((p[2] & 0x60) >> 5) | + ((p[4] & 0x7f) << 2) | + ((p[5] & 0x7f) << 9) | + ((p[3] & 0x07) << 16) | + ((p[3] & 0x70) << 15) | + ((p[0] & 0x01) << 22); + f->y_map = (p[1] & 0x7f) | + ((p[2] & 0x1f) << 7); + + f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7)); + f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3)); + f->z = (p[0] & 4) ? 0 : p[5] & 0x7f; + + alps_decode_buttons_v3(f, p); +} + static void alps_process_touchpad_packet_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; @@ -874,7 +897,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) } /* Bytes 2 - pktsize should have 0 in the highest bit */ - if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && + if (priv->proto_version != ALPS_PROTO_V5 && + psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", psmouse->pktcnt - 1, @@ -994,8 +1018,7 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command, return 0; } -static int alps_enter_command_mode(struct psmouse *psmouse, - unsigned char *resp) +static int alps_enter_command_mode(struct psmouse *psmouse) { unsigned char param[4]; @@ -1004,14 +1027,12 @@ static int alps_enter_command_mode(struct psmouse *psmouse, return -1; } - if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) { + if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) && + param[0] != 0x73) { psmouse_dbg(psmouse, "unknown response while entering command mode\n"); return -1; } - - if (resp) - *resp = param[2]; return 0; } @@ -1176,7 +1197,7 @@ static int alps_passthrough_mode_v3(struct psmouse *psmouse, { int reg_val, ret = -1; - if (alps_enter_command_mode(psmouse, NULL)) + if (alps_enter_command_mode(psmouse)) return -1; reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008); @@ -1216,7 +1237,7 @@ static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base) { int ret = -EIO, reg_val; - if (alps_enter_command_mode(psmouse, NULL)) + if (alps_enter_command_mode(psmouse)) goto error; reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08); @@ -1279,7 +1300,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base) * supported by this driver. If bit 1 isn't set the packet * format is different. */ - if (alps_enter_command_mode(psmouse, NULL) || + if (alps_enter_command_mode(psmouse) || alps_command_mode_write_reg(psmouse, reg_base + 0x08, 0x82) || alps_exit_command_mode(psmouse)) @@ -1306,7 +1327,7 @@ static int alps_hw_init_v3(struct psmouse *psmouse) alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO) goto error; - if (alps_enter_command_mode(psmouse, NULL) || + if (alps_enter_command_mode(psmouse) || alps_absolute_mode_v3(psmouse)) { psmouse_err(psmouse, "Failed to enter absolute mode\n"); goto error; @@ -1381,7 +1402,7 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse) priv->flags &= ~ALPS_DUALPOINT; } - if (alps_enter_command_mode(psmouse, NULL) || + if (alps_enter_command_mode(psmouse) || alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 || alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00)) goto error; @@ -1431,7 +1452,7 @@ static int alps_hw_init_v4(struct psmouse *psmouse) struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; - if (alps_enter_command_mode(psmouse, NULL)) + if (alps_enter_command_mode(psmouse)) goto error; if (alps_absolute_mode_v4(psmouse)) { @@ -1499,6 +1520,23 @@ error: return -1; } +static int alps_hw_init_dolphin_v1(struct psmouse *psmouse) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + unsigned char param[2]; + + /* This is dolphin "v1" as empirically defined by florin9doi */ + param[0] = 0x64; + param[1] = 0x28; + + if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) || + ps2_command(ps2dev, ¶m[0], PSMOUSE_CMD_SETRATE) || + ps2_command(ps2dev, ¶m[1], PSMOUSE_CMD_SETRATE)) + return -1; + + return 0; +} + static void alps_set_defaults(struct alps_data *priv) { priv->byte0 = 0x8f; @@ -1532,6 +1570,21 @@ static void alps_set_defaults(struct alps_data *priv) priv->nibble_commands = alps_v4_nibble_commands; priv->addr_command = PSMOUSE_CMD_DISABLE; break; + case ALPS_PROTO_V5: + priv->hw_init = alps_hw_init_dolphin_v1; + priv->process_packet = alps_process_packet_v3; + priv->decode_fields = alps_decode_dolphin; + priv->set_abs_params = alps_set_abs_params_mt; + priv->nibble_commands = alps_v3_nibble_commands; + priv->addr_command = PSMOUSE_CMD_RESET_WRAP; + priv->byte0 = 0xc8; + priv->mask0 = 0xc8; + priv->flags = 0; + priv->x_max = 1360; + priv->y_max = 660; + priv->x_bits = 23; + priv->y_bits = 12; + break; } } @@ -1592,6 +1645,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) if (alps_match_table(psmouse, priv, e7, ec) == 0) { return 0; + } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && + ec[0] == 0x73 && ec[1] == 0x01) { + priv->proto_version = ALPS_PROTO_V5; + alps_set_defaults(priv); + + return 0; } else if (ec[0] == 0x88 && ec[1] == 0x08) { priv->proto_version = ALPS_PROTO_V3; alps_set_defaults(priv); diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 970480551b6e..eee59853b9ce 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -16,6 +16,7 @@ #define ALPS_PROTO_V2 2 #define ALPS_PROTO_V3 3 #define ALPS_PROTO_V4 4 +#define ALPS_PROTO_V5 5 /** * struct alps_model_info - touchpad ID table diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index 1673dc6c8092..f51765fff054 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c @@ -236,6 +236,13 @@ static int cypress_read_fw_version(struct psmouse *psmouse) cytp->fw_version = param[2] & FW_VERSION_MASX; cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0; + /* + * Trackpad fw_version 11 (in Dell XPS12) yields a bogus response to + * CYTP_CMD_READ_TP_METRICS so do not try to use it. LP: #1103594. + */ + if (cytp->fw_version >= 11) + cytp->tp_metrics_supported = 0; + psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version); psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n", cytp->tp_metrics_supported); @@ -258,6 +265,9 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse) cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width; cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high; + if (!cytp->tp_metrics_supported) + return 0; + memset(param, 0, sizeof(param)); if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) { /* Update trackpad parameters. */ @@ -315,18 +325,15 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse) static int cypress_query_hardware(struct psmouse *psmouse) { - struct cytp_data *cytp = psmouse->private; int ret; ret = cypress_read_fw_version(psmouse); if (ret) return ret; - if (cytp->tp_metrics_supported) { - ret = cypress_read_tp_metrics(psmouse); - if (ret) - return ret; - } + ret = cypress_read_tp_metrics(psmouse); + if (ret) + return ret; return 0; } diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 41b6fbf60112..0bfd8cf25200 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x802: /* Intuos4 General Pen */ case 0x804: /* Intuos4 Marker Pen */ case 0x40802: /* Intuos4 Classic Pen */ - case 0x18803: /* DTH2242 Grip Pen */ + case 0x18802: /* DTH2242 Grip Pen */ case 0x022: wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB = { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBC = - { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, + { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047, 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x26 = { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, @@ -2017,6 +2017,9 @@ static const struct wacom_features wacom_features_0x100 = static const struct wacom_features wacom_features_0x101 = { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x10D = + { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, + 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x4001 = { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2141,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, { USB_DEVICE_WACOM(0x59) }, - { USB_DEVICE_WACOM(0x5D) }, + { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, @@ -2201,11 +2204,12 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, + { USB_DEVICE_WACOM(0x10D) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, { USB_DEVICE_WACOM(0xF8) }, - { USB_DEVICE_WACOM(0xF6) }, + { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 4f702b3ec1a3..434c3df250ca 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -236,7 +236,12 @@ static void __ads7846_disable(struct ads7846 *ts) /* Must be called with ts->lock held */ static void __ads7846_enable(struct ads7846 *ts) { - regulator_enable(ts->reg); + int error; + + error = regulator_enable(ts->reg); + if (error != 0) + dev_err(&ts->spi->dev, "Failed to enable supply: %d\n", error); + ads7846_restart(ts); } diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 4a29ddf6bf1e..1443532fe6c4 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -314,15 +314,27 @@ static int mms114_start(struct mms114_data *data) struct i2c_client *client = data->client; int error; - if (data->core_reg) - regulator_enable(data->core_reg); - if (data->io_reg) - regulator_enable(data->io_reg); + error = regulator_enable(data->core_reg); + if (error) { + dev_err(&client->dev, "Failed to enable avdd: %d\n", error); + return error; + } + + error = regulator_enable(data->io_reg); + if (error) { + dev_err(&client->dev, "Failed to enable vdd: %d\n", error); + regulator_disable(data->core_reg); + return error; + } + mdelay(MMS114_POWERON_DELAY); error = mms114_setup_regs(data); - if (error < 0) + if (error < 0) { + regulator_disable(data->io_reg); + regulator_disable(data->core_reg); return error; + } if (data->pdata->cfg_pin) data->pdata->cfg_pin(true); @@ -335,16 +347,20 @@ static int mms114_start(struct mms114_data *data) static void mms114_stop(struct mms114_data *data) { struct i2c_client *client = data->client; + int error; disable_irq(client->irq); if (data->pdata->cfg_pin) data->pdata->cfg_pin(false); - if (data->io_reg) - regulator_disable(data->io_reg); - if (data->core_reg) - regulator_disable(data->core_reg); + error = regulator_disable(data->io_reg); + if (error) + dev_warn(&client->dev, "Failed to disable vdd: %d\n", error); + + error = regulator_disable(data->core_reg); + if (error) + dev_warn(&client->dev, "Failed to disable avdd: %d\n", error); } static int mms114_input_open(struct input_dev *dev) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 5c514d0711d1..c332fb98480d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -130,7 +130,7 @@ config IRQ_REMAP # OMAP IOMMU support config OMAP_IOMMU bool "OMAP IOMMU Support" - depends on ARCH_OMAP + depends on ARCH_OMAP2PLUS select IOMMU_API config OMAP_IOVMM diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e046d7acca84..830183737b0f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb, /* allocate a protection domain if a device is added */ dma_domain = find_protection_domain(devid); - if (dma_domain) - goto out; - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) - goto out; - dma_domain->target_dev = devid; - - spin_lock_irqsave(&iommu_pd_list_lock, flags); - list_add_tail(&dma_domain->list, &iommu_pd_list); - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); - - dev_data = get_dev_data(dev); + if (!dma_domain) { + dma_domain = dma_ops_domain_alloc(); + if (!dma_domain) + goto out; + dma_domain->target_dev = devid; + + spin_lock_irqsave(&iommu_pd_list_lock, flags); + list_add_tail(&dma_domain->list, &iommu_pd_list); + spin_unlock_irqrestore(&iommu_pd_list_lock, flags); + } dev->archdata.dma_ops = &amd_iommu_dma_ops; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 551768af3fe7..2f46881256a2 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -980,7 +980,7 @@ static void __init free_iommu_all(void) * BIOS should disable L2B micellaneous clock gating by setting * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b */ -static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) +static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) { u32 value; diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index d56f8c17c5fe..7c11ff368d07 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -2,7 +2,6 @@ #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/string.h> -#include <linux/cpumask.h> #include <linux/errno.h> #include <linux/msi.h> #include <linux/irq.h> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 644d72468423..fc6aebf1e4b2 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d) if (gic_arch_extn.irq_retrigger) return gic_arch_extn.irq_retrigger(d); - return -ENXIO; + /* the genirq layer expects 0 if we can't retrigger in hardware */ + return 0; } #ifdef CONFIG_SMP @@ -648,7 +649,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) /* Convert our logical CPU mask into a physical one. */ for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); + map |= gic_cpu_map[cpu]; /* * Ensure that stores to Normal memory are visible to the diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig index 5313c9ea44dc..d9edcc94c2a8 100644 --- a/drivers/isdn/hisax/Kconfig +++ b/drivers/isdn/hisax/Kconfig @@ -237,7 +237,8 @@ config HISAX_MIC config HISAX_NETJET bool "NETjet card" - depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on VIRT_TO_BUS help This enables HiSax support for the NetJet from Traverse Technologies. @@ -248,7 +249,8 @@ config HISAX_NETJET config HISAX_NETJET_U bool "NETspider U card" - depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) + depends on VIRT_TO_BUS help This enables HiSax support for the Netspider U interface ISDN card from Traverse Technologies. diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index d8a7d8323414..ebaebdf30f98 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg) int j; int l; - l = strlen(msg); + l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg) + + sizeof(cmd.parm.cmsg.para) - 2); + if (!l) { isdn_tty_modem_result(RESULT_ERROR, info); return; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 3c955e10a618..c6083132c4b8 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, { struct blk_plug plug; + BUG_ON(dm_bufio_in_request()); + blk_start_plug(&plug); dm_bufio_lock(c); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index fbd3625f2748..83e995fece88 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -83,6 +83,8 @@ struct cache_disk_superblock { __le32 read_misses; __le32 write_hits; __le32 write_misses; + + __le32 policy_version[CACHE_POLICY_VERSION_SIZE]; } __packed; struct dm_cache_metadata { @@ -109,6 +111,7 @@ struct dm_cache_metadata { bool clean_when_opened:1; char policy_name[CACHE_POLICY_NAME_SIZE]; + unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; size_t policy_hint_size; struct dm_cache_statistics stats; }; @@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); disk_super->version = cpu_to_le32(CACHE_VERSION); - memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE); + memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); + memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); disk_super->policy_hint_size = 0; r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, @@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); disk_super->cache_blocks = cpu_to_le32(0); - memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); disk_super->read_hits = cpu_to_le32(0); disk_super->read_misses = cpu_to_le32(0); @@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd, cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); + cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]); + cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]); + cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]); cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); @@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); + disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); + disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); + disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); @@ -854,18 +863,43 @@ struct thunk { bool hints_valid; }; +static bool policy_unchanged(struct dm_cache_metadata *cmd, + struct dm_cache_policy *policy) +{ + const char *policy_name = dm_cache_policy_get_name(policy); + const unsigned *policy_version = dm_cache_policy_get_version(policy); + size_t policy_hint_size = dm_cache_policy_get_hint_size(policy); + + /* + * Ensure policy names match. + */ + if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name))) + return false; + + /* + * Ensure policy major versions match. + */ + if (cmd->policy_version[0] != policy_version[0]) + return false; + + /* + * Ensure policy hint sizes match. + */ + if (cmd->policy_hint_size != policy_hint_size) + return false; + + return true; +} + static bool hints_array_initialized(struct dm_cache_metadata *cmd) { return cmd->hint_root && cmd->policy_hint_size; } static bool hints_array_available(struct dm_cache_metadata *cmd, - const char *policy_name) + struct dm_cache_policy *policy) { - bool policy_names_match = !strncmp(cmd->policy_name, policy_name, - sizeof(cmd->policy_name)); - - return cmd->clean_when_opened && policy_names_match && + return cmd->clean_when_opened && policy_unchanged(cmd, policy) && hints_array_initialized(cmd); } @@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf) return r; } -static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, +static int __load_mappings(struct dm_cache_metadata *cmd, + struct dm_cache_policy *policy, load_mapping_fn fn, void *context) { struct thunk thunk; @@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam thunk.cmd = cmd; thunk.respect_dirty_flags = cmd->clean_when_opened; - thunk.hints_valid = hints_array_available(cmd, policy_name); + thunk.hints_valid = hints_array_available(cmd, policy); return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); } -int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, +int dm_cache_load_mappings(struct dm_cache_metadata *cmd, + struct dm_cache_policy *policy, load_mapping_fn fn, void *context) { int r; down_read(&cmd->root_lock); - r = __load_mappings(cmd, policy_name, fn, context); + r = __load_mappings(cmd, policy, fn, context); up_read(&cmd->root_lock); return r; @@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty /* nothing to be done */ return 0; - value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0)); + value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0)); __dm_bless_for_disk(&value); r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), @@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po __le32 value; size_t hint_size; const char *policy_name = dm_cache_policy_get_name(policy); + const unsigned *policy_version = dm_cache_policy_get_version(policy); if (!policy_name[0] || (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) return -EINVAL; - if (strcmp(cmd->policy_name, policy_name)) { + if (!policy_unchanged(cmd, policy)) { strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); + memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version)); hint_size = dm_cache_policy_get_hint_size(policy); if (!hint_size) diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index 135864ea0eee..f45cef21f3d0 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock, dm_cblock_t cblock, bool dirty, uint32_t hint, bool hint_valid); int dm_cache_load_mappings(struct dm_cache_metadata *cmd, - const char *policy_name, + struct dm_cache_policy *policy, load_mapping_fn fn, void *context); diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index cc05d70b3cb8..b04d1f904d07 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c @@ -17,7 +17,6 @@ /*----------------------------------------------------------------*/ #define DM_MSG_PREFIX "cache cleaner" -#define CLEANER_VERSION "1.0.0" /* Cache entry struct. */ struct wb_cache_entry { @@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, static struct dm_cache_policy_type wb_policy_type = { .name = "cleaner", + .version = {1, 0, 0}, .hint_size = 0, .owner = THIS_MODULE, .create = wb_create @@ -446,7 +446,10 @@ static int __init wb_init(void) if (r < 0) DMERR("register failed %d", r); else - DMINFO("version " CLEANER_VERSION " loaded"); + DMINFO("version %u.%u.%u loaded", + wb_policy_type.version[0], + wb_policy_type.version[1], + wb_policy_type.version[2]); return r; } diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h index 52a75beeced5..0928abdc49f0 100644 --- a/drivers/md/dm-cache-policy-internal.h +++ b/drivers/md/dm-cache-policy-internal.h @@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p); */ const char *dm_cache_policy_get_name(struct dm_cache_policy *p); +const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p); + size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 964153255076..dc112a7137fe 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -14,7 +14,6 @@ #include <linux/vmalloc.h> #define DM_MSG_PREFIX "cache-policy-mq" -#define MQ_VERSION "1.0.0" static struct kmem_cache *mq_entry_cache; @@ -1133,6 +1132,7 @@ bad_cache_alloc: static struct dm_cache_policy_type mq_policy_type = { .name = "mq", + .version = {1, 0, 0}, .hint_size = 4, .owner = THIS_MODULE, .create = mq_create @@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = { static struct dm_cache_policy_type default_policy_type = { .name = "default", + .version = {1, 0, 0}, .hint_size = 4, .owner = THIS_MODULE, .create = mq_create @@ -1164,7 +1165,10 @@ static int __init mq_init(void) r = dm_cache_policy_register(&default_policy_type); if (!r) { - DMINFO("version " MQ_VERSION " loaded"); + DMINFO("version %u.%u.%u loaded", + mq_policy_type.version[0], + mq_policy_type.version[1], + mq_policy_type.version[2]); return 0; } diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c index 2cbf5fdaac52..21c03c570c06 100644 --- a/drivers/md/dm-cache-policy.c +++ b/drivers/md/dm-cache-policy.c @@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p) } EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); +const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p) +{ + struct dm_cache_policy_type *t = p->private; + + return t->version; +} +EXPORT_SYMBOL_GPL(dm_cache_policy_get_version); + size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) { struct dm_cache_policy_type *t = p->private; diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index f0f51b260544..558bdfdabf5f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -196,6 +196,7 @@ struct dm_cache_policy { * We maintain a little register of the different policy types. */ #define CACHE_POLICY_NAME_SIZE 16 +#define CACHE_POLICY_VERSION_SIZE 3 struct dm_cache_policy_type { /* For use by the register code only. */ @@ -206,6 +207,7 @@ struct dm_cache_policy_type { * what gets passed on the target line to select your policy. */ char name[CACHE_POLICY_NAME_SIZE]; + unsigned version[CACHE_POLICY_VERSION_SIZE]; /* * Policies may store a hint for each each cache block. diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0f4e84b15c30..10744091e6ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -6,6 +6,7 @@ #include "dm.h" #include "dm-bio-prison.h" +#include "dm-bio-record.h" #include "dm-cache-metadata.h" #include <linux/dm-io.h> @@ -142,6 +143,7 @@ struct cache { spinlock_t lock; struct bio_list deferred_bios; struct bio_list deferred_flush_bios; + struct bio_list deferred_writethrough_bios; struct list_head quiesced_migrations; struct list_head completed_migrations; struct list_head need_commit_migrations; @@ -158,7 +160,7 @@ struct cache { /* * origin_blocks entries, discarded if set. */ - sector_t discard_block_size; /* a power of 2 times sectors per block */ + uint32_t discard_block_size; /* a power of 2 times sectors per block */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; @@ -199,6 +201,16 @@ struct per_bio_data { bool tick:1; unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; + + /* + * writethrough fields. These MUST remain at the end of this + * structure and the 'cache' member must be the first as it + * is used to determine the offsetof the writethrough fields. + */ + struct cache *cache; + dm_cblock_t cblock; + bio_end_io_t *saved_bi_end_io; + struct dm_bio_details bio_details; }; struct dm_cache_migration { @@ -412,17 +424,24 @@ static bool block_size_is_power_of_two(struct cache *cache) return cache->sectors_per_block_shift >= 0; } +static dm_block_t block_div(dm_block_t b, uint32_t n) +{ + do_div(b, n); + + return b; +} + static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) { - sector_t discard_blocks = cache->discard_block_size; + uint32_t discard_blocks = cache->discard_block_size; dm_block_t b = from_oblock(oblock); if (!block_size_is_power_of_two(cache)) - (void) sector_div(discard_blocks, cache->sectors_per_block); + discard_blocks = discard_blocks / cache->sectors_per_block; else discard_blocks >>= cache->sectors_per_block_shift; - (void) sector_div(b, discard_blocks); + b = block_div(b, discard_blocks); return to_dblock(b); } @@ -500,16 +519,28 @@ static void save_stats(struct cache *cache) /*---------------------------------------------------------------- * Per bio data *--------------------------------------------------------------*/ -static struct per_bio_data *get_per_bio_data(struct bio *bio) + +/* + * If using writeback, leave out struct per_bio_data's writethrough fields. + */ +#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) +#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) + +static size_t get_per_bio_data_size(struct cache *cache) { - struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); + return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; +} + +static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) +{ + struct per_bio_data *pb = dm_per_bio_data(bio, data_size); BUG_ON(!pb); return pb; } -static struct per_bio_data *init_per_bio_data(struct bio *bio) +static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) { - struct per_bio_data *pb = get_per_bio_data(bio); + struct per_bio_data *pb = get_per_bio_data(bio, data_size); pb->tick = false; pb->req_nr = dm_bio_get_target_bio_nr(bio); @@ -543,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { unsigned long flags; - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && @@ -609,6 +641,58 @@ static void issue(struct cache *cache, struct bio *bio) spin_unlock_irqrestore(&cache->lock, flags); } +static void defer_writethrough_bio(struct cache *cache, struct bio *bio) +{ + unsigned long flags; + + spin_lock_irqsave(&cache->lock, flags); + bio_list_add(&cache->deferred_writethrough_bios, bio); + spin_unlock_irqrestore(&cache->lock, flags); + + wake_worker(cache); +} + +static void writethrough_endio(struct bio *bio, int err) +{ + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); + bio->bi_end_io = pb->saved_bi_end_io; + + if (err) { + bio_endio(bio, err); + return; + } + + dm_bio_restore(&pb->bio_details, bio); + remap_to_cache(pb->cache, bio, pb->cblock); + + /* + * We can't issue this bio directly, since we're in interrupt + * context. So it get's put on a bio list for processing by the + * worker thread. + */ + defer_writethrough_bio(pb->cache, bio); +} + +/* + * When running in writethrough mode we need to send writes to clean blocks + * to both the cache and origin devices. In future we'd like to clone the + * bio and send them in parallel, but for now we're doing them in + * series as this is easier. + */ +static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, + dm_oblock_t oblock, dm_cblock_t cblock) +{ + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); + + pb->cache = cache; + pb->cblock = cblock; + pb->saved_bi_end_io = bio->bi_end_io; + dm_bio_record(&pb->bio_details, bio); + bio->bi_end_io = writethrough_endio; + + remap_to_origin_clear_discard(pb->cache, bio, oblock); +} + /*---------------------------------------------------------------- * Migration processing * @@ -972,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) static void process_flush_bio(struct cache *cache, struct bio *bio) { - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); BUG_ON(bio->bi_size); if (!pb->req_nr) @@ -1002,7 +1087,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio) dm_block_t end_block = bio->bi_sector + bio_sectors(bio); dm_block_t b; - (void) sector_div(end_block, cache->discard_block_size); + end_block = block_div(end_block, cache->discard_block_size); for (b = start_block; b < end_block; b++) set_discard(cache, to_dblock(b)); @@ -1044,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, dm_oblock_t block = get_bio_block(cache, bio); struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct policy_result lookup_result; - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); bool discarded_block = is_discarded_oblock(cache, block); bool can_migrate = discarded_block || spare_migration_bandwidth(cache); @@ -1070,14 +1156,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs, inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - if (is_writethrough_io(cache, bio, lookup_result.cblock)) { - /* - * No need to mark anything dirty in write through mode. - */ - pb->req_nr == 0 ? - remap_to_cache(cache, bio, lookup_result.cblock) : - remap_to_origin_clear_discard(cache, bio, block); - } else + if (is_writethrough_io(cache, bio, lookup_result.cblock)) + remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); + else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); issue(cache, bio); @@ -1086,17 +1167,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, case POLICY_MISS: inc_miss_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - - if (pb->req_nr != 0) { - /* - * This is a duplicate writethrough io that is no - * longer needed because the block has been demoted. - */ - bio_endio(bio, 0); - } else { - remap_to_origin_clear_discard(cache, bio, block); - issue(cache, bio); - } + remap_to_origin_clear_discard(cache, bio, block); + issue(cache, bio); break; case POLICY_NEW: @@ -1217,6 +1289,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) submit_bios ? generic_make_request(bio) : bio_io_error(bio); } +static void process_deferred_writethrough_bios(struct cache *cache) +{ + unsigned long flags; + struct bio_list bios; + struct bio *bio; + + bio_list_init(&bios); + + spin_lock_irqsave(&cache->lock, flags); + bio_list_merge(&bios, &cache->deferred_writethrough_bios); + bio_list_init(&cache->deferred_writethrough_bios); + spin_unlock_irqrestore(&cache->lock, flags); + + while ((bio = bio_list_pop(&bios))) + generic_make_request(bio); +} + static void writeback_some_dirty_blocks(struct cache *cache) { int r = 0; @@ -1313,6 +1402,7 @@ static int more_work(struct cache *cache) else return !bio_list_empty(&cache->deferred_bios) || !bio_list_empty(&cache->deferred_flush_bios) || + !bio_list_empty(&cache->deferred_writethrough_bios) || !list_empty(&cache->quiesced_migrations) || !list_empty(&cache->completed_migrations) || !list_empty(&cache->need_commit_migrations); @@ -1331,6 +1421,8 @@ static void do_worker(struct work_struct *ws) writeback_some_dirty_blocks(cache); + process_deferred_writethrough_bios(cache); + if (commit_if_needed(cache)) { process_deferred_flush_bios(cache, false); @@ -1756,8 +1848,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, } r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); - if (r) + if (r) { + *error = "Error setting cache policy's config values"; dm_cache_policy_destroy(cache->policy); + cache->policy = NULL; + } return r; } @@ -1793,8 +1888,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) -static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio); - static int cache_create(struct cache_args *ca, struct cache **result) { int r = 0; @@ -1811,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) cache->ti = ca->ti; ti->private = cache; - ti->per_bio_data_size = sizeof(struct per_bio_data); ti->num_flush_bios = 2; ti->flush_supported = true; @@ -1820,9 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discard_zeroes_data_unsupported = true; memcpy(&cache->features, &ca->features, sizeof(cache->features)); - - if (cache->features.write_through) - ti->num_write_bios = cache_num_write_bios; + ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; dm_table_add_target_callbacks(ti->table, &cache->callbacks); @@ -1835,7 +1925,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) /* FIXME: factor out this whole section */ origin_blocks = cache->origin_sectors = ca->origin_sectors; - (void) sector_div(origin_blocks, ca->block_size); + origin_blocks = block_div(origin_blocks, ca->block_size); cache->origin_blocks = to_oblock(origin_blocks); cache->sectors_per_block = ca->block_size; @@ -1848,7 +1938,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) dm_block_t cache_size = ca->cache_sectors; cache->sectors_per_block_shift = -1; - (void) sector_div(cache_size, ca->block_size); + cache_size = block_div(cache_size, ca->block_size); cache->cache_size = to_cblock(cache_size); } else { cache->sectors_per_block_shift = __ffs(ca->block_size); @@ -1873,6 +1963,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) spin_lock_init(&cache->lock); bio_list_init(&cache->deferred_bios); bio_list_init(&cache->deferred_flush_bios); + bio_list_init(&cache->deferred_writethrough_bios); INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); @@ -2002,6 +2093,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out; r = cache_create(ca, &cache); + if (r) + goto out; r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); if (r) { @@ -2016,26 +2109,13 @@ out: return r; } -static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio) -{ - int r; - struct cache *cache = ti->private; - dm_oblock_t block = get_bio_block(cache, bio); - dm_cblock_t cblock; - - r = policy_lookup(cache->policy, block, &cblock); - if (r < 0) - return 2; /* assume the worst */ - - return (!r && !is_dirty(cache, cblock)) ? 2 : 1; -} - static int cache_map(struct dm_target *ti, struct bio *bio) { struct cache *cache = ti->private; int r; dm_oblock_t block = get_bio_block(cache, bio); + size_t pb_data_size = get_per_bio_data_size(cache); bool can_migrate = false; bool discarded_block; struct dm_bio_prison_cell *cell; @@ -2052,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - pb = init_per_bio_data(bio); + pb = init_per_bio_data(bio, pb_data_size); if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); @@ -2097,18 +2177,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio) inc_hit_counter(cache, bio); pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - if (is_writethrough_io(cache, bio, lookup_result.cblock)) { - /* - * No need to mark anything dirty in write through mode. - */ - pb->req_nr == 0 ? - remap_to_cache(cache, bio, lookup_result.cblock) : - remap_to_origin_clear_discard(cache, bio, block); - cell_defer(cache, cell, false); - } else { + if (is_writethrough_io(cache, bio, lookup_result.cblock)) + remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); + else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); - cell_defer(cache, cell, false); - } + + cell_defer(cache, cell, false); break; case POLICY_MISS: @@ -2143,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) { struct cache *cache = ti->private; unsigned long flags; - struct per_bio_data *pb = get_per_bio_data(bio); + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); if (pb->tick) { policy_tick(cache->policy); @@ -2319,8 +2394,7 @@ static int cache_preresume(struct dm_target *ti) } if (!cache->loaded_mappings) { - r = dm_cache_load_mappings(cache->cmd, - dm_cache_policy_get_name(cache->policy), + r = dm_cache_load_mappings(cache->cmd, cache->policy, load_mapping, cache); if (r) { DMERR("could not load cache mappings"); @@ -2535,7 +2609,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 009339d62828..004ad1652b73 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt) return q && blk_queue_discard(q); } +static bool is_factor(sector_t block_size, uint32_t n) +{ + return !sector_div(block_size, n); +} + /* * If discard_passdown was enabled verify that the data device * supports discards. Disable discard_passdown if not. @@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt) else if (data_limits->discard_granularity > block_size) reason = "discard granularity larger than a block"; - else if (block_size & (data_limits->discard_granularity - 1)) + else if (!is_factor(block_size, data_limits->discard_granularity)) reason = "discard granularity not a factor of block size"; if (reason) { @@ -2544,7 +2549,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 6, 1}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 7, 1}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 6ad538375c3c..a746f1d21c66 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -93,6 +93,13 @@ struct dm_verity_io { */ }; +struct dm_verity_prefetch_work { + struct work_struct work; + struct dm_verity *v; + sector_t block; + unsigned n_blocks; +}; + static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) { return (struct shash_desc *)(io + 1); @@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error) * The root buffer is not prefetched, it is assumed that it will be cached * all the time. */ -static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) +static void verity_prefetch_io(struct work_struct *work) { + struct dm_verity_prefetch_work *pw = + container_of(work, struct dm_verity_prefetch_work, work); + struct dm_verity *v = pw->v; int i; for (i = v->levels - 2; i >= 0; i--) { sector_t hash_block_start; sector_t hash_block_end; - verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); - verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); + verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); + verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); if (!i) { unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); @@ -452,6 +462,25 @@ no_prefetch_cluster: dm_bufio_prefetch(v->bufio, hash_block_start, hash_block_end - hash_block_start + 1); } + + kfree(pw); +} + +static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) +{ + struct dm_verity_prefetch_work *pw; + + pw = kmalloc(sizeof(struct dm_verity_prefetch_work), + GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); + + if (!pw) + return; + + INIT_WORK(&pw->work, verity_prefetch_io); + pw->v = v; + pw->block = io->block; + pw->n_blocks = io->n_blocks; + queue_work(v->verify_wq, &pw->work); } /* @@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) memcpy(io->io_vec, bio_iovec(bio), io->io_vec_size * sizeof(struct bio_vec)); - verity_prefetch_io(v, io); + verity_submit_prefetch(v, io); generic_make_request(bio); @@ -858,7 +887,7 @@ bad: static struct target_type verity_target = { .name = "verity", - .version = {1, 1, 1}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7e469260fe5e..9a0bdad9ad8f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ + trace_block_bio_complete(md->queue, bio, io_error); bio_endio(bio, io_error); } } diff --git a/drivers/md/md.c b/drivers/md/md.c index fcb878f88796..aeceedfc530b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct mddev *mddev) removed++; } } - if (removed) - sysfs_notify(&mddev->kobj, NULL, - "degraded"); - + if (removed && mddev->kobj.sd) + sysfs_notify(&mddev->kobj, NULL, "degraded"); rdev_for_each(rdev, mddev) { if (rdev->raid_disk >= 0 && diff --git a/drivers/md/md.h b/drivers/md/md.h index eca59c3074ef..d90fb1a879e1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev) static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; - if (!test_bit(Replacement, &rdev->flags)) { + if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); } else @@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; - if (!test_bit(Replacement, &rdev->flags)) { + if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); sysfs_remove_link(&mddev->kobj, nm); } diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index c4f28133ef82..b88757cd0d1d 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -139,15 +139,8 @@ struct child { struct btree_node *n; }; -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - -static int init_child(struct dm_btree_info *info, struct btree_node *parent, +static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt, + struct btree_node *parent, unsigned index, struct child *result) { int r, inc; @@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent, result->n = dm_block_data(result->block); if (inc) - inc_children(info->tm, result->n, &le64_type); + inc_children(info->tm, result->n, vt); *((__le64 *) value_ptr(parent, index)) = cpu_to_le64(dm_block_location(result->block)); @@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, } static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, - unsigned left_index) + struct dm_btree_value_type *vt, unsigned left_index) { int r; struct btree_node *parent; @@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, parent = dm_block_data(shadow_current(s)); - r = init_child(info, parent, left_index, &left); + r = init_child(info, vt, parent, left_index, &left); if (r) return r; - r = init_child(info, parent, left_index + 1, &right); + r = init_child(info, vt, parent, left_index + 1, &right); if (r) { exit_child(info, &left); return r; @@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, } static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, - unsigned left_index) + struct dm_btree_value_type *vt, unsigned left_index) { int r; struct btree_node *parent = dm_block_data(shadow_current(s)); @@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, /* * FIXME: fill out an array? */ - r = init_child(info, parent, left_index, &left); + r = init_child(info, vt, parent, left_index, &left); if (r) return r; - r = init_child(info, parent, left_index + 1, ¢er); + r = init_child(info, vt, parent, left_index + 1, ¢er); if (r) { exit_child(info, &left); return r; } - r = init_child(info, parent, left_index + 2, &right); + r = init_child(info, vt, parent, left_index + 2, &right); if (r) { exit_child(info, &left); exit_child(info, ¢er); @@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm, } static int rebalance_children(struct shadow_spine *s, - struct dm_btree_info *info, uint64_t key) + struct dm_btree_info *info, + struct dm_btree_value_type *vt, uint64_t key) { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; @@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s, has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); if (!has_left_sibling) - r = rebalance2(s, info, i); + r = rebalance2(s, info, vt, i); else if (!has_right_sibling) - r = rebalance2(s, info, i - 1); + r = rebalance2(s, info, vt, i - 1); else - r = rebalance3(s, info, i - 1); + r = rebalance3(s, info, vt, i - 1); return r; } @@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); - r = rebalance_children(s, info, key); + r = rebalance_children(s, info, vt, key); if (r) break; @@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, return r; } +static struct dm_btree_value_type le64_type = { + .context = NULL, + .size = sizeof(__le64), + .inc = NULL, + .dec = NULL, + .equal = NULL +}; + int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3ee2912889e7..f4e87bfc7567 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi) return_bi = bi->bi_next; bi->bi_next = NULL; bi->bi_size = 0; + trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), + bi, 0); bio_endio(bi, 0); bi = return_bi; } @@ -671,9 +673,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_next = NULL; if (rrdev) set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), - bi, disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + + if (conf->mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), + bi, disk_devt(conf->mddev->gendisk), + sh->dev[i].sector); generic_make_request(bi); } if (rrdev) { @@ -701,9 +705,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_size = STRIPE_SIZE; rbi->bi_next = NULL; - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), - rbi, disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + if (conf->mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), + rbi, disk_devt(conf->mddev->gendisk), + sh->dev[i].sector); generic_make_request(rbi); } if (!rdev && !rrdev) { @@ -2280,17 +2285,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, int level = conf->level; if (rcw) { - /* if we are not expanding this is a proper write request, and - * there will be bios with new data to be drained into the - * stripe cache - */ - if (!expand) { - sh->reconstruct_state = reconstruct_state_drain_run; - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); - } else - sh->reconstruct_state = reconstruct_state_run; - - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -2303,6 +2297,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, s->locked++; } } + /* if we are not expanding this is a proper write request, and + * there will be bios with new data to be drained into the + * stripe cache + */ + if (!expand) { + if (!s->locked) + /* False alarm, nothing to do */ + return; + sh->reconstruct_state = reconstruct_state_drain_run; + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + } else + sh->reconstruct_state = reconstruct_state_run; + + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); + if (s->locked + conf->max_degraded == disks) if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) atomic_inc(&conf->pending_full_writes); @@ -2311,11 +2320,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); - sh->reconstruct_state = reconstruct_state_prexor_drain_run; - set_bit(STRIPE_OP_PREXOR, &s->ops_request); - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); - for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (i == pd_idx) @@ -2330,6 +2334,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, s->locked++; } } + if (!s->locked) + /* False alarm - nothing to do */ + return; + sh->reconstruct_state = reconstruct_state_prexor_drain_run; + set_bit(STRIPE_OP_PREXOR, &s->ops_request); + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); } /* keep the parity disk(s) locked while asynchronous operations @@ -2564,6 +2575,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, int i; clear_bit(STRIPE_SYNCING, &sh->state); + if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) + wake_up(&conf->wait_for_overlap); s->syncing = 0; s->replacing = 0; /* There is nothing more to do for sync/check/repair. @@ -2737,6 +2750,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, { int i; struct r5dev *dev; + int discard_pending = 0; for (i = disks; i--; ) if (sh->dev[i].written) { @@ -2765,9 +2779,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), 0); - } - } else if (test_bit(R5_Discard, &sh->dev[i].flags)) - clear_bit(R5_Discard, &sh->dev[i].flags); + } else if (test_bit(R5_Discard, &dev->flags)) + discard_pending = 1; + } + if (!discard_pending && + test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { + clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); + clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); + if (sh->qd_idx >= 0) { + clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); + clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); + } + /* now that discard is done we can proceed with any sync */ + clear_bit(STRIPE_DISCARD, &sh->state); + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) + set_bit(STRIPE_HANDLE, &sh->state); + + } if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -2826,8 +2854,10 @@ static void handle_stripe_dirtying(struct r5conf *conf, set_bit(STRIPE_HANDLE, &sh->state); if (rmw < rcw && rmw > 0) { /* prefer read-modify-write, but need to get some data */ - blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", - (unsigned long long)sh->sector, rmw); + if (conf->mddev->queue) + blk_add_trace_msg(conf->mddev->queue, + "raid5 rmw %llu %d", + (unsigned long long)sh->sector, rmw); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if ((dev->towrite || i == sh->pd_idx) && @@ -2877,7 +2907,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, } } } - if (rcw) + if (rcw && conf->mddev->queue) blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", (unsigned long long)sh->sector, rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); @@ -3417,9 +3447,15 @@ static void handle_stripe(struct stripe_head *sh) return; } - if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); + if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + spin_lock(&sh->stripe_lock); + /* Cannot process 'sync' concurrently with 'discard' */ + if (!test_bit(STRIPE_DISCARD, &sh->state) && + test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); + } + spin_unlock(&sh->stripe_lock); } clear_bit(STRIPE_DELAYED, &sh->state); @@ -3579,6 +3615,8 @@ static void handle_stripe(struct stripe_head *sh) test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS, 1); clear_bit(STRIPE_SYNCING, &sh->state); + if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) + wake_up(&conf->wait_for_overlap); } /* If the failed drives are just a ReadError, then we might need @@ -3878,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error) rdev_dec_pending(rdev, conf->mddev); if (!error && uptodate) { + trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), + raid_bi, 0); bio_endio(raid_bi, 0); if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_stripe); @@ -3982,9 +4022,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) atomic_inc(&conf->active_aligned_reads); spin_unlock_irq(&conf->device_lock); - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), - align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_sector); + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), + align_bi, disk_devt(mddev->gendisk), + raid_bio->bi_sector); generic_make_request(align_bi); return 1; } else { @@ -4078,7 +4119,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) } spin_unlock_irq(&conf->device_lock); } - trace_block_unplug(mddev->queue, cnt, !from_schedule); + if (mddev->queue) + trace_block_unplug(mddev->queue, cnt, !from_schedule); kfree(cb); } @@ -4141,6 +4183,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) sh = get_active_stripe(conf, logical_sector, 0, 0, 0); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); + set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); + if (test_bit(STRIPE_SYNCING, &sh->state)) { + release_stripe(sh); + schedule(); + goto again; + } + clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); spin_lock_irq(&sh->stripe_lock); for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) @@ -4153,6 +4202,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) goto again; } } + set_bit(STRIPE_DISCARD, &sh->state); finish_wait(&conf->wait_for_overlap, &w); for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) @@ -4336,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi) if ( rw == WRITE ) md_write_end(mddev); + trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), + bi, 0); bio_endio(bi, 0); } } @@ -4712,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) handled++; } remaining = raid5_dec_bi_active_stripes(raid_bio); - if (remaining == 0) + if (remaining == 0) { + trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), + raid_bio, 0); bio_endio(raid_bio, 0); + } if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_stripe); return handled; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 18b2c4a8a1fd..b0b663b119a8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -221,10 +221,6 @@ struct stripe_head { struct stripe_operations { int target, target2; enum sum_check_flags zero_sum_result; - #ifdef CONFIG_MULTICORE_RAID456 - unsigned long request; - wait_queue_head_t wait_for_ops; - #endif } ops; struct r5dev { /* rreq and rvec are used for the replacement device when @@ -323,6 +319,7 @@ enum { STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, STRIPE_ON_UNPLUG_LIST, + STRIPE_DISCARD, }; /* diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index f19cd7367040..4faaf8053f26 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c @@ -610,7 +610,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer, __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000, rate, rate); - state->estimated_rate[i] = rate; + state->estimated_rate[layer] = rate; } diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index d4e7567b367c..0b899cb6cda1 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable) if (enable) { if (is_code(code, M5MOLS_RESTYPE_MONITOR)) ret = m5mols_start_monitor(info); - if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) + else if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) ret = m5mols_start_capture(info); else ret = -EINVAL; diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index ccd18e4ee789..54579e4c740b 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] = vdelay start of active video in 2 * field lines relative to trailing edge of /VRESET pulse (VDELAY register). sheight height of active video in 2 * field lines. + extraheight Added to sheight for cropcap.bounds.height only videostart0 ITU-R frame line number of the line corresponding to vdelay in the first field. */ #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ - vdelay, sheight, videostart0) \ + vdelay, sheight, extraheight, videostart0) \ .cropcap.bounds.left = minhdelayx1, \ /* * 2 because vertically we count field lines times two, */ \ /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ /* 4 is a safety margin at the end of the line. */ \ .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ - .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \ + .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \ + MIN_VDELAY, \ .cropcap.defrect.left = hdelayx1, \ .cropcap.defrect.top = (videostart0) * 2, \ .cropcap.defrect.width = swidth, \ @@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* totalwidth */ 1135, /* sqwidth */ 944, /* vdelay */ 0x20, - /* bt878 (and bt848?) can capture another - line below active video. */ - /* sheight */ (576 + 2) + 0x20 - 2, + /* sheight */ 576, + /* bt878 (and bt848?) can capture another + line below active video. */ + /* extraheight */ 2, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, @@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 480, + /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_SECAM, @@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, + /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_Nc, @@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 576, + /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_M, @@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 480, + /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_N, @@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, + /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_NTSC_M_JP, @@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x16, /* sheight */ 480, + /* extraheight */ 0, /* videostart0 */ 23) },{ /* that one hopefully works with the strange timing @@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 944, /* vdelay */ 0x1a, /* sheight */ 480, + /* extraheight */ 0, /* videostart0 */ 23) } }; diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c index d4de021dc844..31ce7698acb9 100644 --- a/drivers/media/pci/cx25821/cx25821-video.c +++ b/drivers/media/pci/cx25821/cx25821-video.c @@ -461,7 +461,7 @@ int cx25821_video_register(struct cx25821_dev *dev) spin_lock_init(&dev->slock); - for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) { + for (i = 0; i < VID_CHANNEL_NUM; ++i) { cx25821_init_controls(dev, i); cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper, diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 05d7b6333461..a0639e779973 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC config VIDEO_SH_VEU tristate "SuperH VEU mem2mem video processing driver" - depends on VIDEO_DEV && VIDEO_V4L2 + depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV help diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index 82d9f6ac12f3..33b5ffc8d66d 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c @@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc) static int gsc_m2m_resume(struct gsc_dev *gsc) { + struct gsc_ctx *ctx; unsigned long flags; spin_lock_irqsave(&gsc->slock, flags); /* Clear for full H/W setup in first run after resume */ + ctx = gsc->m2m.ctx; gsc->m2m.ctx = NULL; spin_unlock_irqrestore(&gsc->slock, flags); if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state)) - gsc_m2m_job_finish(gsc->m2m.ctx, - VB2_BUF_STATE_ERROR); + gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + return 0; } @@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev) /* Do not resume if the device was idle before system suspend */ spin_lock_irqsave(&gsc->slock, flags); if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) || - !gsc_m2m_active(gsc)) { + !gsc_m2m_opened(gsc)) { spin_unlock_irqrestore(&gsc->slock, flags); return 0; } diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c index e3916bde45cf..0f513dd19f86 100644 --- a/drivers/media/platform/s5p-fimc/fimc-core.c +++ b/drivers/media/platform/s5p-fimc/fimc-core.c @@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc) static int fimc_m2m_resume(struct fimc_dev *fimc) { + struct fimc_ctx *ctx; unsigned long flags; spin_lock_irqsave(&fimc->slock, flags); /* Clear for full H/W setup in first run after resume */ + ctx = fimc->m2m.ctx; fimc->m2m.ctx = NULL; spin_unlock_irqrestore(&fimc->slock, flags); if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state)) - fimc_m2m_job_finish(fimc->m2m.ctx, - VB2_BUF_STATE_ERROR); + fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); + return 0; } diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c index f0af0754a7b4..ac9663ce2a49 100644 --- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c +++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c @@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = { void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) { enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code; - unsigned int i = ARRAY_SIZE(src_pixfmt_map); + int i = ARRAY_SIZE(src_pixfmt_map); u32 cfg; - while (i-- >= 0) { + while (--i >= 0) { if (src_pixfmt_map[i][0] == pixelcode) break; } @@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f) { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, }; u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); - unsigned int i = ARRAY_SIZE(pixcode); + int i = ARRAY_SIZE(pixcode); - while (i-- >= 0) + while (--i >= 0) if (pixcode[i][0] == dev->fmt->mbus_code) break; cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK; diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c index bfc4206935c8..bbc35de7db27 100644 --- a/drivers/media/platform/s5p-fimc/fimc-lite.c +++ b/drivers/media/platform/s5p-fimc/fimc-lite.c @@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = { .id = V4L2_CTRL_CLASS_USER | 0x1001, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Test Pattern 640x480", + .step = 1, }; static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc) diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c index a17fcb2d5d41..cd38d708ab58 100644 --- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c +++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c @@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source, struct fimc_pipeline *pipeline; struct v4l2_subdev *sd; struct mutex *lock; - int ret = 0; + int i, ret = 0; int ref_count; if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) @@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source, return 0; } + mutex_lock(lock); + ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count; + if (!(flags & MEDIA_LNK_FL_ENABLED)) { - int i; - mutex_lock(lock); - ret = __fimc_pipeline_close(pipeline); + if (ref_count > 0) { + ret = __fimc_pipeline_close(pipeline); + if (!ret && fimc) + fimc_ctrls_delete(fimc->vid_cap.ctx); + } for (i = 0; i < IDX_MAX; i++) pipeline->subdevs[i] = NULL; - if (fimc) - fimc_ctrls_delete(fimc->vid_cap.ctx); - mutex_unlock(lock); - return ret; + } else if (ref_count > 0) { + /* + * Link activation. Enable power of pipeline elements only if + * the pipeline is already in use, i.e. its video node is open. + * Recreate the controls destroyed during the link deactivation. + */ + ret = __fimc_pipeline_open(pipeline, + source->entity, true); + if (!ret && fimc) + ret = fimc_capture_ctrls_create(fimc); } - /* - * Link activation. Enable power of pipeline elements only if the - * pipeline is already in use, i.e. its video node is opened. - * Recreate the controls destroyed during the link deactivation. - */ - mutex_lock(lock); - - ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count; - if (ref_count > 0) - ret = __fimc_pipeline_open(pipeline, source->entity, true); - if (!ret && fimc) - ret = fimc_capture_ctrls_create(fimc); mutex_unlock(lock); return ret ? -EPIPE : ret; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index e84703c314ce..1cb6d57987c6 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err) unsigned int frame_type; dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); - frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); + frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx); /* If frame is same as previous then skip and do not dequeue */ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2356fd52a169..4f6b553c4b2d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -232,6 +232,7 @@ static struct mfc_control controls[] = { .minimum = 0, .maximum = 1, .default_value = 0, + .step = 1, .menu_skip_mask = 0, }, { diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c61f590029ad..348dafc0318a 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c @@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev) static int usb_ma901radio_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_device *dev = interface_to_usbdev(intf); struct ma901radio_device *radio; int retval = 0; + /* Masterkit MA901 usb radio has the same USB ID as many others + * Atmel V-USB devices. Let's make additional checks to be sure + * that this is our device. + */ + + if (dev->product && dev->manufacturer && + (strncmp(dev->product, "MA901", 5) != 0 + || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) + return -ENODEV; + radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); if (!radio) { dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 19f3563c61da..5a79c333d45e 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -291,7 +291,7 @@ config IR_TTUSBIR config IR_RX51 tristate "Nokia N900 IR transmitter diode" - depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM + depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM ---help--- Say Y or M here if you want to enable support for the IR transmitter diode built in the Nokia N900 (RX51) device. diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index a9d355230e8e..768aaf62d5dc 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y) videodev-objs += v4l2-compat-ioctl32.o endif -obj-$(CONFIG_VIDEO_DEV) += videodev.o +obj-$(CONFIG_VIDEO_V4L2) += videodev.o obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 671f5b171c73..c346941a2515 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -858,6 +858,7 @@ config EZX_PCAP config AB8500_CORE bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU + select POWER_SUPPLY select MFD_CORE select IRQ_DOMAIN help diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c index b1f3561b023f..5f341a50ee5a 100644 --- a/drivers/mfd/ab8500-gpadc.c +++ b/drivers/mfd/ab8500-gpadc.c @@ -594,9 +594,12 @@ static int ab8500_gpadc_runtime_suspend(struct device *dev) static int ab8500_gpadc_runtime_resume(struct device *dev) { struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); + int ret; - regulator_enable(gpadc->regu); - return 0; + ret = regulator_enable(gpadc->regu); + if (ret) + dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret); + return ret; } static int ab8500_gpadc_runtime_idle(struct device *dev) @@ -643,7 +646,7 @@ static int ab8500_gpadc_probe(struct platform_device *pdev) } /* VTVout LDO used to power up ab8500-GPADC */ - gpadc->regu = regulator_get(&pdev->dev, "vddadc"); + gpadc->regu = devm_regulator_get(&pdev->dev, "vddadc"); if (IS_ERR(gpadc->regu)) { ret = PTR_ERR(gpadc->regu); dev_err(gpadc->dev, "failed to get vtvout LDO\n"); @@ -652,7 +655,11 @@ static int ab8500_gpadc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gpadc); - regulator_enable(gpadc->regu); + ret = regulator_enable(gpadc->regu); + if (ret) { + dev_err(gpadc->dev, "Failed to enable vtvout LDO: %d\n", ret); + goto fail_enable; + } pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY); pm_runtime_use_autosuspend(gpadc->dev); @@ -663,6 +670,8 @@ static int ab8500_gpadc_probe(struct platform_device *pdev) list_add_tail(&gpadc->node, &ab8500_gpadc_list); dev_dbg(gpadc->dev, "probe success\n"); return 0; + +fail_enable: fail_irq: free_irq(gpadc->irq, gpadc); fail: diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 6b5edf64de2b..4febc5c7fdee 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -460,15 +460,15 @@ static void omap_usbhs_init(struct device *dev) switch (omap->usbhs_rev) { case OMAP_USBHS_REV1: - omap_usbhs_rev1_hostconfig(omap, reg); + reg = omap_usbhs_rev1_hostconfig(omap, reg); break; case OMAP_USBHS_REV2: - omap_usbhs_rev2_hostconfig(omap, reg); + reg = omap_usbhs_rev2_hostconfig(omap, reg); break; default: /* newer revisions */ - omap_usbhs_rev2_hostconfig(omap, reg); + reg = omap_usbhs_rev2_hostconfig(omap, reg); break; } diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c index bbdbc50a3cca..73bf76df1044 100644 --- a/drivers/mfd/palmas.c +++ b/drivers/mfd/palmas.c @@ -257,9 +257,24 @@ static struct regmap_irq_chip palmas_irq_chip = { PALMAS_INT1_MASK), }; -static void palmas_dt_to_pdata(struct device_node *node, +static int palmas_set_pdata_irq_flag(struct i2c_client *i2c, struct palmas_platform_data *pdata) { + struct irq_data *irq_data = irq_get_irq_data(i2c->irq); + if (!irq_data) { + dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq); + return -EINVAL; + } + + pdata->irq_flags = irqd_get_trigger_type(irq_data); + dev_info(&i2c->dev, "Irq flag is 0x%08x\n", pdata->irq_flags); + return 0; +} + +static void palmas_dt_to_pdata(struct i2c_client *i2c, + struct palmas_platform_data *pdata) +{ + struct device_node *node = i2c->dev.of_node; int ret; u32 prop; @@ -283,6 +298,8 @@ static void palmas_dt_to_pdata(struct device_node *node, pdata->power_ctrl = PALMAS_POWER_CTRL_NSLEEP_MASK | PALMAS_POWER_CTRL_ENABLE1_MASK | PALMAS_POWER_CTRL_ENABLE2_MASK; + if (i2c->irq) + palmas_set_pdata_irq_flag(i2c, pdata); } static int palmas_i2c_probe(struct i2c_client *i2c, @@ -304,7 +321,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, if (!pdata) return -ENOMEM; - palmas_dt_to_pdata(node, pdata); + palmas_dt_to_pdata(i2c, pdata); } if (!pdata) @@ -344,6 +361,19 @@ static int palmas_i2c_probe(struct i2c_client *i2c, } } + /* Change interrupt line output polarity */ + if (pdata->irq_flags & IRQ_TYPE_LEVEL_HIGH) + reg = PALMAS_POLARITY_CTRL_INT_POLARITY; + else + reg = 0; + ret = palmas_update_bits(palmas, PALMAS_PU_PD_OD_BASE, + PALMAS_POLARITY_CTRL, PALMAS_POLARITY_CTRL_INT_POLARITY, + reg); + if (ret < 0) { + dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret); + goto err; + } + /* Change IRQ into clear on read mode for efficiency */ slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE); addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL); @@ -352,7 +382,7 @@ static int palmas_i2c_probe(struct i2c_client *i2c, regmap_write(palmas->regmap[slave], addr, reg); ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, 0, &palmas_irq_chip, + IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip, &palmas->irq_data); if (ret < 0) goto err; diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c index 4658b5bdcd84..aeb8e40ab424 100644 --- a/drivers/mfd/tps65912-core.c +++ b/drivers/mfd/tps65912-core.c @@ -169,6 +169,7 @@ err: void tps65912_device_exit(struct tps65912 *tps65912) { mfd_remove_devices(tps65912->dev); + tps65912_irq_exit(tps65912); kfree(tps65912); } diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index e16edca92670..d2ab222138c2 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c @@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(twl4030_audio_enable_resource); * Disable the resource. * The function returns with error or the content of the register */ -int twl4030_audio_disable_resource(unsigned id) +int twl4030_audio_disable_resource(enum twl4030_audio_res id) { struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev); int val; diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c index 88ff9dc83305..942b666a2a07 100644 --- a/drivers/mfd/twl4030-madc.c +++ b/drivers/mfd/twl4030-madc.c @@ -800,7 +800,7 @@ static int twl4030_madc_remove(struct platform_device *pdev) static struct platform_driver twl4030_madc_driver = { .probe = twl4030_madc_probe, - .remove = __exit_p(twl4030_madc_remove), + .remove = twl4030_madc_remove, .driver = { .name = "twl4030_madc", .owner = THIS_MODULE, diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 45ea7185c003..642c6223fa6c 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -152,6 +152,20 @@ static void mei_me_intr_disable(struct mei_device *dev) } /** + * mei_me_hw_reset_release - release device from the reset + * + * @dev: the device structure + */ +static void mei_me_hw_reset_release(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + + hcsr |= H_IG; + hcsr &= ~H_RST; + mei_hcsr_set(hw, hcsr); +} +/** * mei_me_hw_reset - resets fw via mei csr register. * * @dev: the device structure @@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable) if (intr_enable) hcsr |= H_IE; else - hcsr &= ~H_IE; - - mei_hcsr_set(hw, hcsr); - - hcsr = mei_hcsr_read(hw) | H_IG; - hcsr &= ~H_RST; + hcsr |= ~H_IE; mei_hcsr_set(hw, hcsr); - hcsr = mei_hcsr_read(hw); + if (dev->dev_state == MEI_DEV_POWER_DOWN) + mei_me_hw_reset_release(dev); - dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr); + dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw)); } /** @@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } else { - dev_dbg(&dev->pdev->dev, "FW not ready.\n"); + dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); + mei_me_hw_reset_release(dev); mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 6ec530168afb..356179991a2e 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) mei_cl_all_write_clear(dev); } +void mei_stop(struct mei_device *dev) +{ + dev_dbg(&dev->pdev->dev, "stopping the device.\n"); + + mutex_lock(&dev->device_lock); + + cancel_delayed_work(&dev->timer_work); + + mei_wd_stop(dev); + + dev->dev_state = MEI_DEV_POWER_DOWN; + mei_reset(dev, 0); + + mutex_unlock(&dev->device_lock); + + flush_scheduled_work(); +} + diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index cb80166161f0..97873812e33b 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec) void mei_device_init(struct mei_device *dev); void mei_reset(struct mei_device *dev, int interrupts); int mei_hw_init(struct mei_device *dev); +void mei_stop(struct mei_device *dev); /* * MEI interrupt functions prototype diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index b40ec0601ab0..b8b5c9c3ad03 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev) hw = to_me_hw(dev); - mutex_lock(&dev->device_lock); - - cancel_delayed_work(&dev->timer_work); - mei_wd_stop(dev); + dev_err(&pdev->dev, "stop\n"); + mei_stop(dev); mei_pdev = NULL; - if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) { - dev->iamthif_cl.state = MEI_FILE_DISCONNECTING; - mei_cl_disconnect(&dev->iamthif_cl); - } - if (dev->wd_cl.state == MEI_FILE_CONNECTED) { - dev->wd_cl.state = MEI_FILE_DISCONNECTING; - mei_cl_disconnect(&dev->wd_cl); - } - - /* Unregistering watchdog device */ mei_watchdog_unregister(dev); - /* remove entry if already in list */ - dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n"); - - if (dev->open_handle_count > 0) - dev->open_handle_count--; - mei_cl_unlink(&dev->wd_cl); - - if (dev->open_handle_count > 0) - dev->open_handle_count--; - mei_cl_unlink(&dev->iamthif_cl); - - dev->iamthif_current_cb = NULL; - dev->me_clients_num = 0; - - mutex_unlock(&dev->device_lock); - - flush_scheduled_work(); - /* disable interrupts */ mei_disable_interrupts(dev); @@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev = pci_get_drvdata(pdev); - int err; if (!dev) return -ENODEV; - mutex_lock(&dev->device_lock); - cancel_delayed_work(&dev->timer_work); + dev_err(&pdev->dev, "suspend\n"); - /* Stop watchdog if exists */ - err = mei_wd_stop(dev); - /* Set new mei state */ - if (dev->dev_state == MEI_DEV_ENABLED || - dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { - dev->dev_state = MEI_DEV_POWER_DOWN; - mei_reset(dev, 0); - } - mutex_unlock(&dev->device_lock); + mei_stop(dev); + + mei_disable_interrupts(dev); free_irq(pdev->irq, dev); pci_disable_msi(pdev); - return err; + return 0; } static int mei_pci_resume(struct device *device) diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb273..ea98f7e9ccd1 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig @@ -4,7 +4,7 @@ config VMWARE_VMCI tristate "VMware VMCI Driver" - depends on X86 && PCI + depends on X86 && PCI && NET help This is VMware's Virtual Machine Communication Interface. It enables high-speed communication between host and guest in a virtual diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c index ed5c433cd493..f3cdd904fe4d 100644 --- a/drivers/misc/vmw_vmci/vmci_datagram.c +++ b/drivers/misc/vmw_vmci/vmci_datagram.c @@ -42,9 +42,11 @@ struct datagram_entry { struct delayed_datagram_info { struct datagram_entry *entry; - struct vmci_datagram msg; struct work_struct work; bool in_dg_host_queue; + /* msg and msg_payload must be together. */ + struct vmci_datagram msg; + u8 msg_payload[]; }; /* Number of in-flight host->host datagrams */ diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 63feb75cc8e0..9279a9174f84 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -19,6 +19,12 @@ /* 10 parts were found on sflash on Netgear WNDR4500 */ #define BCM47XXPART_MAX_PARTS 12 +/* + * Amount of bytes we read when analyzing each block of flash memory. + * Set it big enough to allow detecting partition and reading important data. + */ +#define BCM47XXPART_BYTES_TO_READ 0x404 + /* Magics */ #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ #define POT_MAGIC1 0x54544f50 /* POTT */ @@ -57,17 +63,15 @@ static int bcm47xxpart_parse(struct mtd_info *master, struct trx_header *trx; int trx_part = -1; int last_trx_part = -1; - int max_bytes_to_read = 0x8004; + int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; if (blocksize <= 0x10000) blocksize = 0x10000; - if (blocksize == 0x20000) - max_bytes_to_read = 0x18004; /* Alloc */ parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, GFP_KERNEL); - buf = kzalloc(max_bytes_to_read, GFP_KERNEL); + buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); /* Parse block by block looking for magics */ for (offset = 0; offset <= master->size - blocksize; @@ -82,7 +86,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read beginning of the block */ - if (mtd_read(master, offset, max_bytes_to_read, + if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, &bytes_read, (uint8_t *)buf) < 0) { pr_err("mtd_read error while parsing (offset: 0x%X)!\n", offset); @@ -96,20 +100,6 @@ static int bcm47xxpart_parse(struct mtd_info *master, continue; } - /* Standard NVRAM */ - if (buf[0x000 / 4] == NVRAM_HEADER || - buf[0x1000 / 4] == NVRAM_HEADER || - buf[0x8000 / 4] == NVRAM_HEADER || - (blocksize == 0x20000 && ( - buf[0x10000 / 4] == NVRAM_HEADER || - buf[0x11000 / 4] == NVRAM_HEADER || - buf[0x18000 / 4] == NVRAM_HEADER))) { - bcm47xxpart_add_part(&parts[curr_part++], "nvram", - offset, 0); - offset = rounddown(offset, blocksize); - continue; - } - /* * board_data starts with board_id which differs across boards, * but we can use 'MPFR' (hopefully) magic at 0x100 @@ -178,6 +168,30 @@ static int bcm47xxpart_parse(struct mtd_info *master, continue; } } + + /* Look for NVRAM at the end of the last block. */ + for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) { + if (curr_part > BCM47XXPART_MAX_PARTS) { + pr_warn("Reached maximum number of partitions, scanning stopped!\n"); + break; + } + + offset = master->size - possible_nvram_sizes[i]; + if (mtd_read(master, offset, 0x4, &bytes_read, + (uint8_t *)buf) < 0) { + pr_err("mtd_read error while reading at offset 0x%X!\n", + offset); + continue; + } + + /* Standard NVRAM */ + if (buf[0] == NVRAM_HEADER) { + bcm47xxpart_add_part(&parts[curr_part++], "nvram", + master->size - blocksize, 0); + break; + } + } + kfree(buf); /* diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 92ab30ab00dc..dc571ebc1aa0 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file, } #endif -static inline unsigned long get_vm_size(struct vm_area_struct *vma) -{ - return vma->vm_end - vma->vm_start; -} - -static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) -{ - return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; -} - -/* - * Set a new vm offset. - * - * Verify that the incoming offset really works as a page offset, - * and that the offset and size fit in a resource_size_t. - */ -static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) -{ - pgoff_t pgoff = off >> PAGE_SHIFT; - if (off != (resource_size_t) pgoff << PAGE_SHIFT) - return -EINVAL; - if (off + get_vm_size(vma) - 1 < off) - return -EINVAL; - vma->vm_pgoff = pgoff; - return 0; -} - /* * set up a mapping for shared memory segments */ @@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; - resource_size_t start, off; - unsigned long len, vma_len; /* This is broken because it assumes the MTD device is map-based and that mtd->priv is a valid struct map_info. It should be replaced with something that uses the mtd_get_unmapped_area() operation properly. */ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { - off = get_vm_offset(vma); - start = map->phys; - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); - start &= PAGE_MASK; - vma_len = get_vm_size(vma); - - /* Overflow in off+len? */ - if (vma_len + off < off) - return -EINVAL; - /* Does it fit in the mapping? */ - if (vma_len + off > len) - return -EINVAL; - - off += start; - /* Did that overflow? */ - if (off < start) - return -EINVAL; - if (set_vm_offset(vma, off) < 0) - return -EINVAL; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - #ifdef pgprot_noncached - if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - return 0; + return vm_iomap_memory(vma, map->phys, map->size); } return -ENOSYS; #else diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 43214151b882..42c63927609d 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1523,6 +1523,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, oobreadlen -= toread; } } + + if (chip->options & NAND_NEED_READRDY) { + /* Apply delay or wait for ready/busy pin */ + if (!chip->dev_ready) + udelay(chip->chip_delay); + else + nand_wait_ready(mtd); + } } else { memcpy(buf, chip->buffers->databuf + col, bytes); buf += bytes; @@ -1787,6 +1795,14 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, len = min(len, readlen); buf = nand_transfer_oob(chip, buf, ops, len); + if (chip->options & NAND_NEED_READRDY) { + /* Apply delay or wait for ready/busy pin */ + if (!chip->dev_ready) + udelay(chip->chip_delay); + else + nand_wait_ready(mtd); + } + readlen -= len; if (!readlen) break; diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index e3aa2748a6e7..9c612388e5de 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -22,49 +22,51 @@ * 512 512 Byte page size */ struct nand_flash_dev nand_flash_ids[] = { +#define SP_OPTIONS NAND_NEED_READRDY +#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) #ifdef CONFIG_MTD_NAND_MUSEUM_IDS - {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, - {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, - {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, - {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, - {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, - {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, - {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, - {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, - {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, - {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, - - {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, - {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, - {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, - {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, + {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, + {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, + {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, + {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, + {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, + {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, + {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, + + {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, + {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, + {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, + {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, #endif - {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, - {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, - {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, - {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, - {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, - {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, - {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, - {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, - {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, - {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, - - {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, + {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, + {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, + {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, + {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, + + {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, + {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, + {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, + {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, + + {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, + {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, + {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, + {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, + + {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, + {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, + {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, + {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, + {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, + {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, + {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, + + {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, /* * These are the new chips with large page size. The pagesize and the diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7bd068a6056a..dbbea0eec134 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); + netif_addr_lock_bh(bond->dev); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_del(old_active->dev, ha->addr); + netif_addr_unlock_bh(bond->dev); } if (new_active) { @@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); + netif_addr_lock_bh(bond->dev); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_add(new_active->dev, ha->addr); + netif_addr_unlock_bh(bond->dev); } } @@ -1746,6 +1750,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_compute_features(bond); + bond_update_speed_duplex(new_slave); + read_lock(&bond->lock); new_slave->last_arp_rx = jiffies - @@ -1798,8 +1804,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->link == BOND_LINK_DOWN ? "DOWN" : (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); - bond_update_speed_duplex(new_slave); - if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { @@ -1901,11 +1905,29 @@ err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); err_detach: + if (!USES_PRIMARY(bond->params.mode)) { + netif_addr_lock_bh(bond_dev); + bond_mc_list_flush(bond_dev, slave_dev); + netif_addr_unlock_bh(bond_dev); + } + bond_del_vlans_from_slave(bond, slave_dev); write_lock_bh(&bond->lock); bond_detach_slave(bond, new_slave); + if (bond->primary_slave == new_slave) + bond->primary_slave = NULL; write_unlock_bh(&bond->lock); + if (bond->curr_active_slave == new_slave) { + read_lock(&bond->lock); + write_lock_bh(&bond->curr_slave_lock); + bond_change_active_slave(bond, NULL); + bond_select_active_slave(bond); + write_unlock_bh(&bond->curr_slave_lock); + read_unlock(&bond->lock); + } + slave_disable_netpoll(new_slave); err_close: + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_unset_master: @@ -1964,7 +1986,6 @@ static int __bond_release_one(struct net_device *bond_dev, } block_netpoll_tx(); - call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); @@ -1977,12 +1998,11 @@ static int __bond_release_one(struct net_device *bond_dev, return -EINVAL; } + write_unlock_bh(&bond->lock); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); - write_unlock_bh(&bond->lock); - synchronize_net(); write_lock_bh(&bond->lock); if (!all && !bond->params.fail_over_mac) { @@ -2066,8 +2086,10 @@ static int __bond_release_one(struct net_device *bond_dev, write_unlock_bh(&bond->lock); unblock_netpoll_tx(); - if (bond->slave_cnt == 0) + if (bond->slave_cnt == 0) { call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); + call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); + } bond_compute_features(bond); if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && @@ -2373,8 +2395,6 @@ static void bond_miimon_commit(struct bonding *bond) bond_set_backup_slave(slave); } - bond_update_speed_duplex(slave); - pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, slave->speed, slave->duplex ? "full" : "half"); @@ -3170,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { struct slave *slave = bond_slave_get_rtnl(slave_dev); - struct bonding *bond = slave->bond; - struct net_device *bond_dev = slave->bond->dev; + struct bonding *bond; + struct net_device *bond_dev; u32 old_speed; u8 old_duplex; + /* A netdev event can be generated while enslaving a device + * before netdev_rx_handler_register is called in which case + * slave will be NULL + */ + if (!slave) + return NOTIFY_DONE; + bond_dev = slave->bond->dev; + bond = slave->bond; + switch (event) { case NETDEV_UNREGISTER: if (bond->setup_by_slave) @@ -3288,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { - struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph; - struct ipv6hdr *ipv6h; + const struct ethhdr *data; + const struct iphdr *iph; + const struct ipv6hdr *ipv6h; u32 v6hash; - __be32 *s, *d; + const __be32 *s, *d; if (skb->protocol == htons(ETH_P_IP) && - skb_network_header_len(skb) >= sizeof(*iph)) { + pskb_network_may_pull(skb, sizeof(*iph))) { iph = ip_hdr(skb); + data = (struct ethhdr *)skb->data; return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - skb_network_header_len(skb) >= sizeof(*ipv6h)) { + pskb_network_may_pull(skb, sizeof(*ipv6h))) { ipv6h = ipv6_hdr(skb); + data = (struct ethhdr *)skb->data; s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); @@ -3320,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { u32 layer4_xor = 0; - struct iphdr *iph; - struct ipv6hdr *ipv6h; - __be32 *s, *d; - __be16 *layer4hdr; + const struct iphdr *iph; + const struct ipv6hdr *ipv6h; + const __be32 *s, *d; + const __be16 *l4 = NULL; + __be16 _l4[2]; + int noff = skb_network_offset(skb); + int poff; if (skb->protocol == htons(ETH_P_IP) && - skb_network_header_len(skb) >= sizeof(*iph)) { + pskb_may_pull(skb, noff + sizeof(*iph))) { iph = ip_hdr(skb); - if (!ip_is_fragment(iph) && - (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) && - (skb_headlen(skb) - skb_network_offset(skb) >= - iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { - layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + poff = proto_ports_offset(iph->protocol); + + if (!ip_is_fragment(iph) && poff >= 0) { + l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, + sizeof(_l4), &_l4); + if (l4) + layer4_xor = ntohs(l4[0] ^ l4[1]); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - skb_network_header_len(skb) >= sizeof(*ipv6h)) { + pskb_may_pull(skb, noff + sizeof(*ipv6h))) { ipv6h = ipv6_hdr(skb); - if ((ipv6h->nexthdr == IPPROTO_TCP || - ipv6h->nexthdr == IPPROTO_UDP) && - (skb_headlen(skb) - skb_network_offset(skb) >= - sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { - layer4hdr = (__be16 *)(ipv6h + 1); - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + poff = proto_ports_offset(ipv6h->nexthdr); + if (poff >= 0) { + l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, + sizeof(_l4), &_l4); + if (l4) + layer4_xor = ntohs(l4[0] ^ l4[1]); } s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; @@ -4848,9 +4882,18 @@ static int __net_init bond_net_init(struct net *net) static void __net_exit bond_net_exit(struct net *net) { struct bond_net *bn = net_generic(net, bond_net_id); + struct bonding *bond, *tmp_bond; + LIST_HEAD(list); bond_destroy_sysfs(bn); bond_destroy_proc_dir(bn); + + /* Kill off any bonds created after unregistering bond rtnl ops */ + rtnl_lock(); + list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) + unregister_netdevice_queue(bond->dev, &list); + unregister_netdevice_many(&list); + rtnl_unlock(); } static struct pernet_operations bond_net_ops = { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1c9e09fbdff8..ea7a388f4843 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master, sprintf(linkname, "slave_%s", slave->name); ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), linkname); + + /* free the master link created earlier in case of error */ + if (ret) + sysfs_remove_link(&(slave->dev.kobj), "master"); + return ret; } @@ -522,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, goto out; } if (new_value < 0) { - pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", + pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", bond->dev->name, new_value, INT_MAX); ret = -EINVAL; goto out; @@ -537,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d, pr_info("%s: Setting ARP monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.arp_interval = new_value; - if (bond->params.miimon) { - pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", - bond->dev->name, bond->dev->name); - bond->params.miimon = 0; - } - if (!bond->params.arp_targets[0]) { - pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", - bond->dev->name); + if (new_value) { + if (bond->params.miimon) { + pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", + bond->dev->name, bond->dev->name); + bond->params.miimon = 0; + } + if (!bond->params.arp_targets[0]) + pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", + bond->dev->name); } if (bond->dev->flags & IFF_UP) { /* If the interface is up, we may need to fire off @@ -552,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d, * timer will get fired off when the open function * is called. */ - cancel_delayed_work_sync(&bond->mii_work); - queue_delayed_work(bond->wq, &bond->arp_work, 0); + if (!new_value) { + cancel_delayed_work_sync(&bond->arp_work); + } else { + cancel_delayed_work_sync(&bond->mii_work); + queue_delayed_work(bond->wq, &bond->arp_work, 0); + } } - out: rtnl_unlock(); return ret; @@ -697,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d, } if (new_value < 0) { pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 1, INT_MAX); + bond->dev->name, new_value, 0, INT_MAX); ret = -EINVAL; goto out; } else { @@ -752,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d, goto out; } if (new_value < 0) { - pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 1, INT_MAX); + pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", + bond->dev->name, new_value, 0, INT_MAX); ret = -EINVAL; goto out; } else { @@ -963,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d, } if (new_value < 0) { pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 1, INT_MAX); + bond->dev->name, new_value, 0, INT_MAX); ret = -EINVAL; goto out; - } else { - pr_info("%s: Setting MII monitoring interval to %d.\n", - bond->dev->name, new_value); - bond->params.miimon = new_value; - if (bond->params.updelay) - pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", - bond->dev->name, - bond->params.updelay * bond->params.miimon); - if (bond->params.downdelay) - pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", - bond->dev->name, - bond->params.downdelay * bond->params.miimon); - if (bond->params.arp_interval) { - pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", - bond->dev->name); - bond->params.arp_interval = 0; - if (bond->params.arp_validate) { - bond->params.arp_validate = - BOND_ARP_VALIDATE_NONE; - } - } - - if (bond->dev->flags & IFF_UP) { - /* If the interface is up, we may need to fire off - * the MII timer. If the interface is down, the - * timer will get fired off when the open function - * is called. - */ + } + pr_info("%s: Setting MII monitoring interval to %d.\n", + bond->dev->name, new_value); + bond->params.miimon = new_value; + if (bond->params.updelay) + pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", + bond->dev->name, + bond->params.updelay * bond->params.miimon); + if (bond->params.downdelay) + pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", + bond->dev->name, + bond->params.downdelay * bond->params.miimon); + if (new_value && bond->params.arp_interval) { + pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", + bond->dev->name); + bond->params.arp_interval = 0; + if (bond->params.arp_validate) + bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; + } + if (bond->dev->flags & IFF_UP) { + /* If the interface is up, we may need to fire off + * the MII timer. If the interface is down, the + * timer will get fired off when the open function + * is called. + */ + if (!new_value) { + cancel_delayed_work_sync(&bond->mii_work); + } else { cancel_delayed_work_sync(&bond->arp_work); queue_delayed_work(bond->wq, &bond->mii_work, 0); } diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index f32b9fc6a983..9aa0c64c33c8 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net) struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; struct mcp251x_platform_data *pdata = spi->dev.platform_data; + unsigned long flags; int ret; ret = open_candev(net); @@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; + flags = IRQF_ONESHOT; + if (pdata->irq_flags) + flags |= pdata->irq_flags; + else + flags |= IRQF_TRIGGER_FALLING; + ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, - DEVICE_NAME, priv); + flags, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); if (pdata->transceiver_enable) diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index b39ca5b3ea7f..ff2ba86cd4a4 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -46,6 +46,7 @@ config CAN_EMS_PCI config CAN_PEAK_PCMCIA tristate "PEAK PCAN-PC Card" depends on PCMCIA + depends on HAS_IOPORT ---help--- This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) from PEAK-System (http://www.peak-system.com). To compile this diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a042cdc260dc..3c18d7d000ed 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) */ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == REG_CR_BASICCAN_INITIAL && - (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && + (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) flag = 1; @@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) * See states on p. 23 of the Datasheet. */ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && - priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && + priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) return flag; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index daf4013a8fc7..e4df307eaa90 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) */ spin_lock_irqsave(&priv->cmdreg_lock, flags); priv->write_reg(priv, REG_CMR, val); - priv->read_reg(priv, REG_SR); + priv->read_reg(priv, SJA1000_REG_SR); spin_unlock_irqrestore(&priv->cmdreg_lock, flags); } @@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { n++; - status = priv->read_reg(priv, REG_SR); + status = priv->read_reg(priv, SJA1000_REG_SR); /* check for absent controller due to hw unplug */ if (status == 0xFF && sja1000_is_absent(priv)) return IRQ_NONE; @@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) /* receive interrupt */ while (status & SR_RBS) { sja1000_rx(dev); - status = priv->read_reg(priv, REG_SR); + status = priv->read_reg(priv, SJA1000_REG_SR); /* check for absent controller */ if (status == 0xFF && sja1000_is_absent(priv)) return IRQ_NONE; diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index afa99847a510..aa48e053da27 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -56,7 +56,7 @@ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ #define REG_MOD 0x00 #define REG_CMR 0x01 -#define REG_SR 0x02 +#define SJA1000_REG_SR 0x02 #define REG_IR 0x03 #define REG_IER 0x04 #define REG_ALC 0x0B diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 6433b81256cd..8e0c4a001939 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) struct net_device *dev; struct sja1000_priv *priv; struct resource res; - const u32 *prop; - int err, irq, res_size, prop_size; + u32 prop; + int err, irq, res_size; void __iomem *base; err = of_address_to_resource(np, 0, &res); @@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->read_reg = sja1000_ofp_read_reg; priv->write_reg = sja1000_ofp_write_reg; - prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->can.clock.freq = *prop / 2; + err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); + if (!err) + priv->can.clock.freq = prop / 2; else priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ - prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->ocr |= *prop & OCR_MODE_MASK; + err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); + if (!err) + priv->ocr |= prop & OCR_MODE_MASK; else priv->ocr |= OCR_MODE_NORMAL; /* default */ - prop = of_get_property(np, "nxp,tx-output-config", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; + err = of_property_read_u32(np, "nxp,tx-output-config", &prop); + if (!err) + priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; else priv->ocr |= OCR_TX0_PULLDOWN; /* default */ - prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32)) && *prop) { - u32 divider = priv->can.clock.freq * 2 / *prop; + err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); + if (!err && prop) { + u32 divider = priv->can.clock.freq * 2 / prop; if (divider > 1) priv->cdr |= divider / 2 - 1; @@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->cdr |= CDR_CLK_OFF; /* default */ } - prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); - if (!prop) + if (!of_property_read_bool(np, "nxp,no-comparator-bypass")) priv->cdr |= CDR_CBP; /* default */ priv->irq_flags = IRQF_SHARED; diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index cab306a9888e..e1d26433d619 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev) struct ei_device *ei_local; struct ax_device *ax; struct resource *irq, *mem, *mem2; - resource_size_t mem_size, mem2_size = 0; + unsigned long mem_size, mem2_size = 0; int ret = 0; dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0d..b5fd934585e9 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -186,7 +186,7 @@ struct atl1e_tpd_desc { /* how about 0x2000 */ #define MAX_TX_BUF_LEN 0x2000 #define MAX_TX_BUF_SHIFT 13 -/*#define MAX_TX_BUF_LEN 0x3000 */ +#define MAX_TSO_SEG_SIZE 0x3c00 /* rrs word 1 bit 0:31 */ #define RRS_RX_CSUM_MASK 0xFFFF @@ -438,7 +438,6 @@ struct atl1e_adapter { struct atl1e_hw hw; struct atl1e_hw_stats hw_stats; - bool have_msi; u32 wol; u16 link_speed; u16 link_duplex; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..ac25f05ff68f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); - - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); } static int atl1e_request_irq(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; - int flags = 0; int err = 0; - adapter->have_msi = true; - err = pci_enable_msi(pdev); - if (err) { - netdev_dbg(netdev, - "Unable to allocate MSI interrupt Error: %d\n", err); - adapter->have_msi = false; - } - - if (!adapter->have_msi) - flags |= IRQF_SHARED; - err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, + netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); - if (adapter->have_msi) - pci_disable_msi(pdev); return err; } netdev_dbg(netdev, "atl1e_request_irq OK\n"); @@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a923bc4d5a1f..57619dd4a92b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } } + /* initialize FW coalescing state machines in RAM */ + bnx2x_update_coalesce(bp); + /* setup the leading queue */ rc = bnx2x_setup_leading(bp); if (rc) { @@ -2760,6 +2763,7 @@ load_error2: bp->port.pmf = 0; load_error1: bnx2x_napi_disable(bp); + bnx2x_del_all_napi(bp); /* clear pf_load status, as it was already set */ if (IS_PF(bp)) @@ -4579,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); - u16 flags = REG_RD16(bp, addr); + u8 flags = REG_RD8(bp, addr); /* clear and set */ flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; - REG_WR16(bp, addr, flags); + REG_WR8(bp, addr, flags); DP(NETIF_MSG_IFUP, "port %x fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 568205436a15..91ecd6a00d05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) break; default: BNX2X_ERR("Non valid capability ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); - rval = -EINVAL; + rval = 1; } DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); @@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) break; default: BNX2X_ERR("Non valid TC-ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); - rval = -EINVAL; + rval = 1; } return rval; @@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) return -EINVAL; } -static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) +static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); @@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, break; default: BNX2X_ERR("Non valid featrue-ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); - rval = -EINVAL; + rval = 1; } return rval; @@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, break; default: BNX2X_ERR("Non valid featrue-ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); - rval = -EINVAL; + rval = 1; } return rval; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 31c5787970db..0283f343b0d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC6, &rx_tx_in_reset); - if (!rx_tx_in_reset) { + if ((!rx_tx_in_reset) && + (params->link_flags & + PHY_INITIALIZED)) { bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_sfi(phy, params); bnx2x_warpcore_reset_lane(bp, phy, 0); @@ -12527,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; + params->link_flags = PHY_INITIALIZED; /* Driver opens NIG-BRB filters */ bnx2x_set_rx_filter(params, 1); /* Check if link flap can be avoided */ @@ -12691,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params, struct bnx2x *bp = params->bp; vars->link_up = 0; vars->phy_flags = 0; + params->link_flags &= ~PHY_INITIALIZED; if (!params->lfa_base) return bnx2x_link_reset(params, vars, 1); /* @@ -13411,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params, vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; bnx2x_update_link_attr(params, vars->link_attr_sync); + vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; /* Restart AN on leading lane */ bnx2x_warpcore_restart_AN_KR(phy, params); } @@ -13431,11 +13437,24 @@ static void bnx2x_check_kr2_wa(struct link_params *params, { struct bnx2x *bp = params->bp; u16 base_page, next_page, not_kr2_device, lane; - int sigdet = bnx2x_warpcore_get_sigdet(phy, params); + int sigdet; + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * since some switches tend to reinit the AN process and clear the + * advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } + + sigdet = bnx2x_warpcore_get_sigdet(phy, params); if (!sigdet) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No sigdet\n"); + } return; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index be5c195d03dd..56c2aae4e2c8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -309,6 +309,7 @@ struct link_params { req_flow_ctrl is set to AUTO */ u16 link_flags; #define LINK_FLAGS_INT_DISABLED (1<<0) +#define PHY_INITIALIZED (1<<1) u32 lfa_base; }; @@ -342,7 +343,8 @@ struct link_vars { u32 link_status; u32 eee_status; u8 fault_detected; - u8 rsrv1; + u8 check_kr2_recovery_cnt; +#define CHECK_KR2_RECOVERY_CNT 5 u16 periodic_flags; #define PERIODIC_FLAGS_LINK_EVENT 0x0001 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..c50696b396f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) q); } - if (!NO_FCOE(bp)) { + if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { fp = &bp->fp[FCOE_IDX(bp)]; queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; @@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); } } + if (!CHIP_IS_E1x(bp)) + /* block FW from writing to host */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + /* wait until BRB is empty */ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { @@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); + bp->cnic_enabled = false; kfree(bp->cnic_kwq); bp->cnic_kwq = NULL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 364e37ecbc5c..198f6f1c9ad5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old { #define UPDATE_QSTAT(s, t) \ do { \ - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ } while (0) #define UPDATE_QSTAT_OLD(f) \ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index fdb9b5655414..17a972734ba7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp) tg3_ump_link_report(tp); } + + tp->link_up = netif_carrier_ok(tp->dev); } static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) @@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) return err; } -static void tg3_carrier_on(struct tg3 *tp) -{ - netif_carrier_on(tp->dev); - tp->link_up = true; -} - static void tg3_carrier_off(struct tg3 *tp) { netif_carrier_off(tp->dev); @@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp) return -EBUSY; if (netif_running(tp->dev) && tp->link_up) { - tg3_carrier_off(tp); + netif_carrier_off(tp->dev); tg3_link_report(tp); } @@ -4134,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp) tp->link_config.active_speed = tp->link_config.speed; tp->link_config.active_duplex = tp->link_config.duplex; + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + /* With autoneg disabled, 5715 only links up when the + * advertisement register has the configured speed + * enabled. + */ + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); + } + bmcr = 0; switch (tp->link_config.speed) { default: @@ -4262,9 +4266,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up) { if (curr_link_up != tp->link_up) { if (curr_link_up) { - tg3_carrier_on(tp); + netif_carrier_on(tp->dev); } else { - tg3_carrier_off(tp); + netif_carrier_off(tp->dev); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } @@ -14600,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) if (j + len > block_end) goto partno; - memcpy(tp->fw_ver, &vpd_data[j], len); - strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + if (len >= sizeof(tp->fw_ver)) + len = sizeof(tp->fw_ver) - 1; + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, + &vpd_data[j]); } partno: diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b5973..b0ebc9f6d55e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -163,6 +163,7 @@ #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ /* XGMAC_INT_STAT reg */ +#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ @@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev) writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + /* Mask power mgt interrupt */ + writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); + /* XGMAC requires AXI bus init. This is a 'magic number' for now */ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); @@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) struct sk_buff *skb; int frame_len; + if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) + break; + entry = priv->rx_tail; p = priv->dma_rx + entry; if (desc_get_owner(p)) @@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) unsigned int pmt = 0; if (mode & WAKE_MAGIC) - pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; if (mode & WAKE_UCAST) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4ce62031f62f..8049268ce0f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_BASE 0 #define VPD_LEN 512 +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 /** * t4_seeprom_wp - enable/disable EEPROM write protection @@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { u32 cclk_param, cclk_val; - int i, ret; + int i, ret, addr; int ec, sn; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; @@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; - ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); + if (ret < 0) + goto out; + addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); if (ret < 0) goto out; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d13..9eada8e86078 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) tmp = readl(reg); } +/* + * Sleep, either by using msleep() or if we are suspending, then + * use mdelay() to sleep. + */ +static void dm9000_msleep(board_info_t *db, unsigned int ms) +{ + if (db->in_suspend) + mdelay(ms); + else + msleep(ms); +} + +/* Read a word from phyxcer */ +static int +dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) +{ + board_info_t *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_save; + int ret; + + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Issue phyxcer read command */ + iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait read complete */ + + spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ + + /* The read data keeps on REG_0D & REG_0E */ + ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + mutex_unlock(&db->addr_lock); + + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); + return ret; +} + +/* Write a word to phyxcer */ +static void +dm9000_phy_write(struct net_device *dev, + int phyaddr_unused, int reg, int value) +{ + board_info_t *db = netdev_priv(dev); + unsigned long flags; + unsigned long reg_save; + + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Fill the written data into REG_0D & REG_0E */ + iow(db, DM9000_EPDRL, value); + iow(db, DM9000_EPDRH, value >> 8); + + /* Issue phyxcer write command */ + iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait write complete */ + + spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + + spin_unlock_irqrestore(&db->lock, flags); + mutex_unlock(&db->addr_lock); +} + /* dm9000_set_io * * select the specified set of io routines to use with the @@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev) iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ + dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ + ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; /* if wol is needed, then always set NCR_WAKEEN otherwise we end @@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev) return 0; } -/* - * Sleep, either by using msleep() or if we are suspending, then - * use mdelay() to sleep. - */ -static void dm9000_msleep(board_info_t *db, unsigned int ms) -{ - if (db->in_suspend) - mdelay(ms); - else - msleep(ms); -} - -/* - * Read a word from phyxcer - */ -static int -dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) -{ - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned int reg_save; - int ret; - - mutex_lock(&db->addr_lock); - - spin_lock_irqsave(&db->lock,flags); - - /* Save previous register address */ - reg_save = readb(db->io_addr); - - /* Fill the phyxcer register into REG_0C */ - iow(db, DM9000_EPAR, DM9000_PHY | reg); - - iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ - - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock,flags); - - dm9000_msleep(db, 1); /* Wait read complete */ - - spin_lock_irqsave(&db->lock,flags); - reg_save = readb(db->io_addr); - - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ - - /* The read data keeps on REG_0D & REG_0E */ - ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); - - /* restore the previous address */ - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock,flags); - - mutex_unlock(&db->addr_lock); - - dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); - return ret; -} - -/* - * Write a word to phyxcer - */ -static void -dm9000_phy_write(struct net_device *dev, - int phyaddr_unused, int reg, int value) -{ - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned long reg_save; - - dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); - mutex_lock(&db->addr_lock); - - spin_lock_irqsave(&db->lock,flags); - - /* Save previous register address */ - reg_save = readb(db->io_addr); - - /* Fill the phyxcer register into REG_0C */ - iow(db, DM9000_EPAR, DM9000_PHY | reg); - - /* Fill the written data into REG_0D & REG_0E */ - iow(db, DM9000_EPDRL, value); - iow(db, DM9000_EPDRH, value >> 8); - - iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ - - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock, flags); - - dm9000_msleep(db, 1); /* Wait write complete */ - - spin_lock_irqsave(&db->lock,flags); - reg_save = readb(db->io_addr); - - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ - - /* restore the previous address */ - writeb(reg_save, db->io_addr); - - spin_unlock_irqrestore(&db->lock, flags); - mutex_unlock(&db->addr_lock); -} - static void dm9000_shutdown(struct net_device *dev) { @@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev) db->flags |= DM9000_PLATF_SIMPLE_PHY; #endif - dm9000_reset(db); + /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), + * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo + * while probe stage. + */ + + iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); /* try multiple times, DM9000 sometimes gets the read wrong */ for (i = 0; i < 8; i++) { diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3ef..9ce058adabab 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h @@ -69,7 +69,9 @@ #define NCR_WAKEEN (1<<6) #define NCR_FCOL (1<<4) #define NCR_FDX (1<<3) -#define NCR_LBK (3<<1) + +#define NCR_RESERVED (3<<1) +#define NCR_MAC_LBK (1<<1) #define NCR_RST (1<<0) #define NSR_SPEED (1<<7) @@ -167,5 +169,12 @@ #define ISR_LNKCHNG (1<<5) #define ISR_UNDERRUN (1<<4) +/* Davicom MII registers. + */ + +#define MII_DM_DSPCR 0x1b /* DSP Control Register */ + +#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ + #endif /* _DM9000X_H_ */ diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 0c37fb2cc867..1df33c799c00 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -108,6 +108,7 @@ config TULIP_DM910X config DE4X5 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" depends on (PCI || EISA) + depends on VIRT_TO_BUS || ALPHA || PPC || SPARC select CRC32 ---help--- This is support for the DIGITAL series of PCI/EISA Ethernet cards. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 28ceb8414185..29aff55f2eea 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -349,6 +349,7 @@ struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; + u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 071aea79d218..3c9b4f12e3e5 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) return 0; } -static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) +static u16 be_POST_stage_get(struct be_adapter *adapter) { u32 sem; - u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH : - SLIPORT_SEMAPHORE_OFFSET_BE; - pci_read_config_dword(adapter->pdev, reg, &sem); - *stage = sem & POST_STAGE_MASK; - - if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK) - return -1; + if (BEx_chip(adapter)) + sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); else - return 0; + pci_read_config_dword(adapter->pdev, + SLIPORT_SEMAPHORE_OFFSET_SH, &sem); + + return sem & POST_STAGE_MASK; } int lancer_wait_ready(struct be_adapter *adapter) @@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { - status = be_POST_stage_get(adapter, &stage); - if (status) { - dev_err(dev, "POST error; stage=0x%x\n", stage); - return -1; - } else if (stage != POST_STAGE_ARMFW_RDY) { - if (msleep_interruptible(2000)) { - dev_err(dev, "Waiting for POST aborted\n"); - return -EINTR; - } - timeout += 2; - } else { + stage = be_POST_stage_get(adapter); + if (stage == POST_STAGE_ARMFW_RDY) return 0; + + dev_info(dev, "Waiting for POST, %ds elapsed\n", + timeout); + if (msleep_interruptible(2000)) { + dev_err(dev, "Waiting for POST aborted\n"); + return -EINTR; } + timeout += 2; } while (timeout < 60); dev_err(dev, "POST timeout; stage=0x%x\n", stage); diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 541d4530d5bf..62dc220695f7 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -32,8 +32,8 @@ #define MPU_EP_CONTROL 0 /********** MPU semphore: used for SH & BE *************/ -#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c -#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 +#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */ +#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */ #define POST_STAGE_MASK 0x0000FFFF #define POST_ERR_MASK 0x1 #define POST_ERR_SHIFT 31 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3860888ac711..2886c9b63f90 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (vlan_tx_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); - __vlan_put_tag(skb, vlan_tag); - skb->vlan_tci = 0; + skb = __vlan_put_tag(skb, vlan_tag); + if (skb) + skb->vlan_tci = 0; } return skb; @@ -3688,6 +3689,8 @@ static void be_netdev_init(struct net_device *netdev) static void be_unmap_pci_bars(struct be_adapter *adapter) { + if (adapter->csr) + pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); } @@ -3721,6 +3724,12 @@ static int be_map_pci_bars(struct be_adapter *adapter) adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> SLI_INTF_IF_TYPE_SHIFT; + if (BEx_chip(adapter) && be_physfn(adapter)) { + adapter->csr = pci_iomap(adapter->pdev, 2, 0); + if (adapter->csr == NULL) + return -ENOMEM; + } + addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); if (addr == NULL) goto pci_map_err; @@ -4329,6 +4338,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) pci_restore_state(pdev); /* Check if card is ok and fw is ready */ + dev_info(&adapter->pdev->dev, + "Waiting for FW to be ready after EEH reset\n"); status = be_fw_wait_ready(adapter); if (status) return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 069a155d16ed..73195f643c9c 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct bufdesc *bdp; + unsigned int i; + + /* Initialize the receive buffer descriptors. */ + bdp = fep->rx_bd_base; + for (i = 0; i < RX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp->cbd_sc |= BD_SC_WRAP; + + fep->cur_rx = fep->rx_bd_base; + + /* ...and the same for transmit */ + bdp = fep->tx_bd_base; + fep->cur_tx = bdp; + for (i = 0; i < TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = 0; + if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp->cbd_sc |= BD_SC_WRAP; + fep->dirty_tx = bdp; +} + /* This function is called to start or restart the FEC during a link * change. This only happens when switching between half and full * duplex. @@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); + fec_enet_bd_init(ndev); + /* Set receive and transmit descriptor base. */ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); if (fep->bufdesc_ex) @@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); - fep->cur_rx = fep->rx_bd_base; for (i = 0; i <= TX_RING_MOD_MASK; i++) { if (fep->tx_skbuff[i]) { @@ -934,24 +982,29 @@ static void fec_enet_adjust_link(struct net_device *ndev) goto spin_unlock; } - /* Duplex link change */ if (phy_dev->link) { - if (fep->full_duplex != phy_dev->duplex) { - fec_restart(ndev, phy_dev->duplex); - /* prevent unnecessary second fec_restart() below */ + if (!fep->link) { fep->link = phy_dev->link; status_change = 1; } - } - /* Link on or off change */ - if (phy_dev->link != fep->link) { - fep->link = phy_dev->link; - if (phy_dev->link) + if (fep->full_duplex != phy_dev->duplex) + status_change = 1; + + if (phy_dev->speed != fep->speed) { + fep->speed = phy_dev->speed; + status_change = 1; + } + + /* if any of the above changed restart the FEC */ + if (status_change) fec_restart(ndev, phy_dev->duplex); - else + } else { + if (fep->link) { fec_stop(ndev); - status_change = 1; + fep->link = phy_dev->link; + status_change = 1; + } } spin_unlock: @@ -1328,7 +1381,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) static void fec_enet_free_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int i; + unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; @@ -1352,7 +1405,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) static int fec_enet_alloc_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int i; + unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; @@ -1437,6 +1490,7 @@ fec_enet_close(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); /* Don't know what to do yet. */ + napi_disable(&fep->napi); fep->opened = 0; netif_stop_queue(ndev); fec_stop(ndev); @@ -1592,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc *cbd_base; - struct bufdesc *bdp; - int i; /* Allocate memory for buffer descriptors. */ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, @@ -1603,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev) return -ENOMEM; } + memset(cbd_base, 0, PAGE_SIZE); spin_lock_init(&fep->hw_lock); fep->netdev = ndev; @@ -1626,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); - bdp->cbd_sc |= BD_SC_WRAP; - - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; - for (i = 0; i < TX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); - bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; - fec_restart(ndev, 0); return 0; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f5390071efd0..eb4372962839 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -240,6 +240,7 @@ struct fec_enet_private { phy_interface_t phy_interface; int link; int full_duplex; + int speed; struct completion mdio_done; int irq[FEC_IRQ_NUM]; int bufdesc_ex; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 1f17ca0f2201..0d8df400a479 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) spin_unlock_irqrestore(&fep->tmreg_lock, flags); } +EXPORT_SYMBOL(fec_ptp_start_cyclecounter); /** * fec_ptp_adjfreq - adjust ptp cycle frequency @@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +EXPORT_SYMBOL(fec_ptp_ioctl); /** * fec_time_keep - call timecounter_read every second to avoid timer overrun @@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) pr_info("registered PHC device on %s\n", ndev->name); } } +EXPORT_SYMBOL(fec_ptp_init); diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -870,7 +870,7 @@ err_unlock: } static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, - void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) { struct cb *cb; unsigned long flags; @@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, nic->cbs_avail--; cb->skb = skb; + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + if (unlikely(!nic->cbs_avail)) err = -ENOSPC; - cb_prepare(nic, cb, skb); /* Order is important otherwise we'll be in a race with h/w: * set S-bit in current first, then clear S-bit in previous. */ @@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) nic->mii.mdio_write = mdio_write; } -static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct config *config = &cb->u.config; u8 *c = (u8 *)config; @@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); + return 0; } /************************************************************************* @@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) return fw; } -static void e100_setup_ucode(struct nic *nic, struct cb *cb, +static int e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) { const struct firmware *fw = (void *)skb; @@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; } static inline int e100_load_ucode_wait(struct nic *nic) @@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) return err; } -static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = cpu_to_le16(cb_iaaddr); memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; } -static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = cpu_to_le16(cb_dump); cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + offsetof(struct mem, dump_buf)); + return 0; } static int e100_phy_check_without_mii(struct nic *nic) @@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) return 0; } -static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct net_device *netdev = nic->netdev; struct netdev_hw_addr *ha; @@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, ETH_ALEN); } + return 0; } static void e100_set_multicast_list(struct net_device *netdev) @@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); } -static void e100_xmit_prepare(struct nic *nic, struct cb *cb, +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, struct sk_buff *skb) { + dma_addr_t dma_addr; cb->command = nic->tx_command; + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) + return -ENOMEM; + /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for * testing, ie sending frames with bad CRC. @@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, cb->u.tcb.tcb_byte_count = 0; cb->u.tcb.threshold = nic->tx_threshold; cb->u.tcb.tbd_count = 1; - cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, - skb->data, skb->len, PCI_DMA_TODEVICE)); - /* check for mapping failure? */ + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); cb->u.tcb.tbd.size = cpu_to_le16(skb->len); skb_tx_timestamp(skb); + return 0; } static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4e..ffd287196bf8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) txdr->buffer_info[i].dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); tx_desc->lower.data = cpu_to_le32(skb->len); tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | @@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), GFP_KERNEL); if (!rxdr->buffer_info) { - ret_val = 4; + ret_val = 5; goto err_nomem; } @@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, GFP_KERNEL); if (!rxdr->desc) { - ret_val = 5; + ret_val = 6; goto err_nomem; } memset(rxdr->desc, 0, rxdr->size); @@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); if (!skb) { - ret_val = 6; + ret_val = 7; goto err_nomem; } skb_reserve(skb, NET_IP_ALIGN); @@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->buffer_info[i].dma = dma_map_single(&pdev->dev, skb->data, E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); memset(skb->data, 0x00, skb->len); } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 2c1813737f6d..f91a8f3f9d48 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -36,6 +36,7 @@ #include <linux/delay.h> #include <linux/vmalloc.h> #include <linux/mdio.h> +#include <linux/pm_runtime.h> #include "e1000.h" @@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev, return 0; } +static int e1000e_ethtool_begin(struct net_device *netdev) +{ + return pm_runtime_get_sync(netdev->dev.parent); +} + +static void e1000e_ethtool_complete(struct net_device *netdev) +{ + pm_runtime_put_sync(netdev->dev.parent); +} + static const struct ethtool_ops e1000_ethtool_ops = { + .begin = e1000e_ethtool_begin, + .complete = e1000e_ethtool_complete, .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index dff7bff8b8e0..121a865c7fbd 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -782,6 +782,59 @@ release: } /** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + **/ +static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = er32(FEXTNVM6); + s32 ret_val = 0; + + if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { + u16 kmrn_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto release; + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usleep_range(10, 20); + + ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + return ret_val; +} + +/** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * @@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + /* Work-around I218 hang issue */ + if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; @@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + if (hw->phy.type == e1000_phy_i217) { - u16 phy_reg; + u16 phy_reg, device_id = hw->adapter->pdev->device; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + u32 fextnvm6 = er32(FEXTNVM6); + + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } ret_val = hw->phy.ops.acquire(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index b6d3174d7d2d..8bf4655c2e17 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -92,6 +92,8 @@ #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a177b8b65c44..7e615e2bf7e6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -848,11 +848,16 @@ check_page: } } - if (!buffer_info->dma) + if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + } rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); @@ -4303,6 +4308,7 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); adapter->idle_check = true; + hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); /* fire a link status change interrupt to start the watchdog */ @@ -4662,6 +4668,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; + pm_runtime_get_sync(&adapter->pdev->dev); ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); @@ -4672,6 +4679,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); + pm_runtime_put_sync(&adapter->pdev->dev); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults @@ -5887,8 +5895,7 @@ release: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, - bool runtime) +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5912,10 +5919,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, } e1000e_reset_interrupt_capability(adapter); - retval = pci_save_state(pdev); - if (retval) - return retval; - status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -5971,13 +5974,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, ew32(WUFC, 0); } - *enable_wake = !!wufc; - - /* make sure adapter isn't asleep if manageability is enabled */ - if ((adapter->flags & FLAG_MNG_PT_ENABLED) || - (hw->mac.ops.check_mng_mode(hw))) - *enable_wake = true; - if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -5986,27 +5982,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, */ e1000e_release_hw_control(adapter); - pci_disable_device(pdev); - - return 0; -} - -static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) -{ - if (sleep && wake) { - pci_prepare_to_sleep(pdev); - return; - } - - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); -} - -static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, - bool wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); + pci_clear_master(pdev); /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To @@ -6021,12 +5997,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - e1000_power_off(pdev, sleep, wake); + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); - } else { - e1000_power_off(pdev, sleep, wake); } + + return 0; } #ifdef CONFIG_PCIEASPM @@ -6084,9 +6061,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); + pci_set_master(pdev); e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { @@ -6152,14 +6127,8 @@ static int __e1000_resume(struct pci_dev *pdev) static int e1000_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - retval = __e1000_shutdown(pdev, &wake, false); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); - - return retval; + return __e1000_shutdown(pdev, false); } static int e1000_resume(struct device *dev) @@ -6182,13 +6151,10 @@ static int e1000_runtime_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (e1000e_pm_ready(adapter)) { - bool wake; - - __e1000_shutdown(pdev, &wake, true); - } + if (!e1000e_pm_ready(adapter)) + return 0; - return 0; + return __e1000_shutdown(pdev, true); } static int e1000_idle(struct device *dev) @@ -6226,12 +6192,7 @@ static int e1000_runtime_resume(struct device *dev) static void e1000_shutdown(struct pci_dev *pdev) { - bool wake = false; - - __e1000_shutdown(pdev, &wake, false); - - if (system_state == SYSTEM_POWER_OFF) - e1000_complete_shutdown(pdev, false, wake); + __e1000_shutdown(pdev, false); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -6352,9 +6313,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - pci_set_master(pdev); pdev->state_saved = true; pci_restore_state(pdev); + pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -6783,7 +6744,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 794fe1497666..a7e6a3e37257 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -42,6 +42,7 @@ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 84e7e0909def..12b1d8480808 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) switch (hw->phy.type) { case e1000_phy_i210: case e1000_phy_m88: - if (hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); - else + break; + default: ret_val = igb_copper_link_setup_m88(hw); + break; + } break; case e1000_phy_igp_3: ret_val = igb_copper_link_setup_igp(hw); @@ -1813,27 +1818,32 @@ out: **/ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) { - u32 dtxswc; + u32 reg_val, reg_offset; switch (hw->mac.type) { case e1000_82576: + reg_offset = E1000_DTXSWC; + break; case e1000_i350: - dtxswc = rd32(E1000_DTXSWC); - if (enable) { - dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - /* The PF can spoof - it has to in order to - * support emulation mode NICs */ - dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); - } else { - dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - } - wr32(E1000_DTXSWC, dtxswc); + reg_offset = E1000_TXSWC; break; default: - break; + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); } + wr32(reg_offset, reg_val); } /** diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index d27edbc63923..ab577a763a20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -284,18 +284,10 @@ struct igb_q_vector { enum e1000_ring_flags_t { IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, - IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_DETECT_HANG }; -#define ring_uses_build_skb(ring) \ - test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -#define set_ring_build_skb_enabled(ring) \ - set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) -#define clear_ring_build_skb_enabled(ring) \ - clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) - #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_RX_DESC(R, i) \ @@ -447,7 +439,7 @@ struct igb_adapter { #endif struct i2c_algo_bit_data i2c_algo; struct i2c_adapter i2c_adap; - struct igb_i2c_client_list *i2c_clients; + struct i2c_client *i2c_client; }; #define IGB_FLAG_HAS_MSI (1 << 0) diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 0a9b073d0b03..0478a1abe541 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -39,6 +39,10 @@ #include <linux/pci.h> #ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + /* hwmon callback functions */ static ssize_t igb_hwmon_show_location(struct device *dev, struct device_attribute *attr, @@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter) unsigned int i; int n_attrs; int rc = 0; + struct i2c_client *client = NULL; /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) @@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter) if (rc) goto exit; + /* init i2c_client */ + client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); + if (client == NULL) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device..\n"); + goto exit; + } + adapter->i2c_client = client; + /* Allocation space for max attributes * max num sensors * values (loc, temp, max, caution) */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ed79a1c53b59..64f75291e3a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) return; } -static const struct i2c_board_info i350_sensor_info = { - I2C_BOARD_INFO("i350bb", 0Xf8), -}; - /* igb_init_i2c - Init I2C interface * @adapter: pointer to adapter structure * @@ -2546,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; - igb_enable_sriov(pdev, max_vfs); pci_sriov_set_totalvfs(pdev, 7); + igb_enable_sriov(pdev, max_vfs); #endif /* CONFIG_PCI_IOV */ } @@ -2656,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter) if (max_vfs > 7) { dev_warn(&pdev->dev, "Maximum of 7 VFs per PF, using max\n"); - adapter->vfs_allocated_count = 7; + max_vfs = adapter->vfs_allocated_count = 7; } else adapter->vfs_allocated_count = max_vfs; if (adapter->vfs_allocated_count) @@ -3354,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, wr32(E1000_RXDCTL(reg_idx), rxdctl); } -static void igb_set_rx_buffer_len(struct igb_adapter *adapter, - struct igb_ring *rx_ring) -{ -#define IGB_MAX_BUILD_SKB_SIZE \ - (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ - (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) - - /* set build_skb flag */ - if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) - set_ring_build_skb_enabled(rx_ring); - else - clear_ring_build_skb_enabled(rx_ring); -} - /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure @@ -3387,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *rx_ring = adapter->rx_ring[i]; - igb_set_rx_buffer_len(adapter, rx_ring); - igb_configure_rx_ring(adapter, rx_ring); - } + for (i = 0; i < adapter->num_rx_queues; i++) + igb_configure_rx_ring(adapter, adapter->rx_ring[i]); } /** @@ -6207,85 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return igb_can_reuse_rx_page(rx_buffer, page, truesize); } -static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc) -{ - struct igb_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; - void *page_addr; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); -#if (PAGE_SIZE < 8192) - unsigned int truesize = IGB_RX_BUFSZ; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(NET_SKB_PAD + - NET_IP_ALIGN + - size); -#endif - - /* If we spanned a buffer we have a huge mess so test for it */ - BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); - - /* Guarantee this function can be used by verifying buffer sizes */ - BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD + - NET_IP_ALIGN + - IGB_TS_HDR_LEN + - ETH_FRAME_LEN + - ETH_FCS_LEN)); - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); - - page_addr = page_address(page) + rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); -#endif - - /* build an skb to around the page buffer */ - skb = build_skb(page_addr, truesize); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_failed++; - return NULL; - } - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - IGB_RX_BUFSZ, - DMA_FROM_DEVICE); - - /* update pointers within the skb to store the data */ - skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); - __skb_put(skb, size); - - /* pull timestamp out of packet data */ - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); - __skb_pull(skb, IGB_TS_HDR_LEN); - } - - if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { - /* hand second half of page back to the ring */ - igb_reuse_rx_page(rx_ring, rx_buffer); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - } - - /* clear contents of buffer_info */ - rx_buffer->dma = 0; - rx_buffer->page = NULL; - - return skb; -} - static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -6701,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rmb(); /* retrieve a buffer from the ring */ - if (ring_uses_build_skb(rx_ring)) - skb = igb_build_rx_buffer(rx_ring, rx_desc); - else - skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); + skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); /* exit if we failed to retrieve a buffer */ if (!skb) @@ -6791,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; } -static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) -{ - if (ring_uses_build_skb(rx_ring)) - return NET_SKB_PAD + NET_IP_ALIGN; - else - return 0; -} - /** * igb_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure @@ -6825,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + - bi->page_offset + - igb_rx_offset(rx_ring)); + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; @@ -7724,67 +7611,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) } } -static DEFINE_SPINLOCK(i2c_clients_lock); - -/* igb_get_i2c_client - returns matching client - * in adapters's client list. - * @adapter: adapter struct - * @dev_addr: device address of i2c needed. - */ -static struct i2c_client * -igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) -{ - ulong flags; - struct igb_i2c_client_list *client_list; - struct i2c_client *client = NULL; - struct i2c_board_info client_info = { - I2C_BOARD_INFO("igb", 0x00), - }; - - spin_lock_irqsave(&i2c_clients_lock, flags); - client_list = adapter->i2c_clients; - - /* See if we already have an i2c_client */ - while (client_list) { - if (client_list->client->addr == (dev_addr >> 1)) { - client = client_list->client; - goto exit; - } else { - client_list = client_list->next; - } - } - - /* no client_list found, create a new one */ - client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC); - if (client_list == NULL) - goto exit; - - /* dev_addr passed to us is left-shifted by 1 bit - * i2c_new_device call expects it to be flush to the right. - */ - client_info.addr = dev_addr >> 1; - client_info.platform_data = adapter; - client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info); - if (client_list->client == NULL) { - dev_info(&adapter->pdev->dev, - "Failed to create new i2c device..\n"); - goto err_no_client; - } - - /* insert new client at head of list */ - client_list->next = adapter->i2c_clients; - adapter->i2c_clients = client_list; - - client = client_list->client; - goto exit; - -err_no_client: - kfree(client_list); -exit: - spin_unlock_irqrestore(&i2c_clients_lock, flags); - return client; -} - /* igb_read_i2c_byte - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read @@ -7798,7 +7624,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + struct i2c_client *this_client = adapter->i2c_client; s32 status; u16 swfw_mask = 0; @@ -7835,7 +7661,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + struct i2c_client *this_client = adapter->i2c_client; s32 status; u16 swfw_mask = E1000_SWFW_PHY0_SM; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0987822359f0..0a237507ee85 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter) case e1000_82576: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; - adapter->ptp_caps.max_adj = 1000000000; + adapter->ptp_caps.max_adj = 999999881; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea4808373435..b5f94abe3cff 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -2159,6 +2159,10 @@ map_skb: skb->data, adapter->rx_buffer_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } rx_desc = IXGB_RX_DESC(*rx_ring, i); rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); @@ -2168,7 +2172,8 @@ map_skb: rx_desc->status = 0; - if (++i == rx_ring->count) i = 0; + if (++i == rx_ring->count) + i = 0; buffer_info = &rx_ring->buffer_info[i]; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..79f4a26ea6cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void) ixgbe_dbg_init(); #endif /* CONFIG_DEBUG_FS */ + ret = pci_register_driver(&ixgbe_driver); + if (ret) { +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_exit(); +#endif /* CONFIG_DEBUG_FS */ + return ret; + } + #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif - ret = pci_register_driver(&ixgbe_driver); - return ret; + return 0; } module_init(ixgbe_init_module); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..97e33669c0b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, + vf); + if (err) + goto out; err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c3db6cd69b68..2b6cb5ca48ee 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -944,9 +944,17 @@ free_queue_irqs: free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; + /* This failure is non-recoverable - it indicates the system is + * out of MSIX vector resources and the VF driver cannot run + * without them. Set the number of msix vectors to zero + * indicating that not enough can be allocated. The error + * will be returned to the user indicating device open failed. + * Any further attempts to force the driver to open will also + * fail. The only way to recover is to unload the driver and + * reload it again. If the system has recovered some MSIX + * vectors then it may succeed. + */ + adapter->num_msix_vectors = 0; return err; } @@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; int err; + /* A previous failure to open the device because of a lack of + * available MSIX vector resources may have reset the number + * of msix vectors variable to zero. The only way to recover + * is to unload/reload the driver and hope that the system has + * been able to recover some MSIX vector resources. + */ + if (!adapter->num_msix_vectors) + return -ENOMEM; + /* disallow open during test */ if (test_bit(__IXGBEVF_TESTING, &adapter->state)) return -EBUSY; @@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev) err_req_irq: ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); err_setup_rx: ixgbevf_free_all_rx_resources(adapter); err_setup_tx: diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a2127489af7..bfdb06860397 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev) return 0; err_free: - kfree(dev); + free_netdev(dev); err_out: return err; } diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..434e33c527df 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -33,6 +33,7 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" + select PHYLIB ---help--- This driver supports the MDIO interface found in the network interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, @@ -45,7 +46,6 @@ config MVMDIO config MVNETA tristate "Marvell Armada 370/XP network interface support" depends on MACH_ARMADA_370_XP - select PHYLIB select MVMDIO ---help--- This driver supports the network interface units in the diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 29140502b71a..6562c736a1d8 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) /* mii management interface *************************************************/ +static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) +{ + u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); + u32 autoneg_disable = FORCE_LINK_PASS | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_FOR_DUPLEX; + + if (mp->phy->autoneg == AUTONEG_ENABLE) { + /* enable auto negotiation */ + pscr &= ~autoneg_disable; + goto out_write; + } + + pscr |= autoneg_disable; + + if (mp->phy->speed == SPEED_1000) { + /* force gigabit, half duplex not supported */ + pscr |= SET_GMII_SPEED_TO_1000; + pscr |= SET_FULL_DUPLEX_MODE; + goto out_write; + } + + pscr &= ~SET_GMII_SPEED_TO_1000; + + if (mp->phy->speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + else + pscr &= ~SET_MII_SPEED_TO_100; + + if (mp->phy->duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + else + pscr &= ~SET_FULL_DUPLEX_MODE; + +out_write: + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) { struct mv643xx_eth_shared_private *msp = dev_id; @@ -1499,6 +1538,7 @@ static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; if (mp->phy == NULL) return -EINVAL; @@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ cmd->advertising &= ~ADVERTISED_1000baseT_Half; - return phy_ethtool_sset(mp->phy, cmd); + ret = phy_ethtool_sset(mp->phy, cmd); + if (!ret) + mv643xx_adjust_pscr(mp); + return ret; } static void mv643xx_eth_get_drvinfo(struct net_device *dev, @@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev) static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; - if (mp->phy != NULL) - return phy_mii_ioctl(mp->phy, ifr, cmd); + if (mp->phy == NULL) + return -ENOTSUPP; - return -EOPNOTSUPP; + ret = phy_mii_ioctl(mp->phy, ifr, cmd); + if (!ret) + mv643xx_adjust_pscr(mp); + return ret; } static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..a47a097c21e1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -374,7 +374,6 @@ static int rxq_number = 8; static int txq_number = 8; static int rxq_def; -static int txq_def; #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" @@ -1475,7 +1474,8 @@ error: static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; + u16 txq_id = skb_get_queue_mapping(skb); + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; struct netdev_queue *nq; int frags = 0; @@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; frags = skb_shinfo(skb)->nr_frags + 1; - nq = netdev_get_tx_queue(dev, txq_def); + nq = netdev_get_tx_queue(dev, txq_id); /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); @@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev) return -EINVAL; } - dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); + dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); if (!dev) return -ENOMEM; @@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev) netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->priv_flags |= IFF_UNICAST_FLT; + err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); goto err_deinit; } - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->priv_flags |= IFF_UNICAST_FLT; - netdev_info(dev, "mac: %pM\n", dev->dev_addr); platform_set_drvdata(pdev, pp->dev); @@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO); module_param(txq_number, int, S_IRUGO); module_param(rxq_def, int, S_IRUGO); -module_param(txq_def, int, S_IRUGO); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721b..6a0e671fcecd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); - tp = space - 2048/8; + tp = space - 8192/8; sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); } else { diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea860..ec6dcd80152b 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2074,7 +2074,7 @@ enum { GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ -#define GMAC_DEF_MSK GM_IS_TX_FF_UR +#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) }; /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 7e64033d7de3..0706623cfb96 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb4d8d99f36d..30d78f806dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) { - unsigned int i; - for (i = ETH_ALEN - 1; i; --i) { + int i; + for (i = ETH_ALEN - 1; i >= 0; --i) { dst_mac[i] = src_mac & 0xff; src_mac >>= 8; } @@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int qpn = priv->base_qpn; - u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); - - en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", - priv->dev->dev_addr); - mlx4_unregister_mac(dev, priv->port, mac); + u64 mac; - if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + priv->dev->dev_addr); + mlx4_unregister_mac(dev, priv->port, mac); + } else { struct mlx4_mac_entry *entry; struct hlist_node *tmp; struct hlist_head *bucket; - unsigned int mac_hash; + unsigned int i; - mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; - bucket = &priv->mac_hash[mac_hash]; - hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { - if (ether_addr_equal_64bits(entry->mac, - priv->dev->dev_addr)) { - en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", - priv->port, priv->dev->dev_addr, qpn); + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + mac = mlx4_en_mac_to_u64(entry->mac); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, qpn, entry->reg_id); - mlx4_qp_release_range(dev, qpn, 1); + mlx4_unregister_mac(dev, priv->port, mac); hlist_del_rcu(&entry->hlist); kfree_rcu(entry, rcu); - break; } } + + en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", + priv->port, qpn); + mlx4_qp_release_range(dev, qpn, 1); + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; } } @@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr) return mac; } -static int mlx4_en_set_mac(struct net_device *dev, void *addr) +static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) { - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - struct sockaddr *saddr = addr; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); - queue_work(mdev->workqueue, &priv->mac_task); - return 0; -} - -static void mlx4_en_do_set_mac(struct work_struct *work) -{ - struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - mac_task); - struct mlx4_en_dev *mdev = priv->mdev; int err = 0; - mutex_lock(&mdev->state_lock); if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_en_replace_mac(priv, priv->base_qpn, @@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work) } else en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); + return err; +} + +static int mlx4_en_set_mac(struct net_device *dev, void *addr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + + mutex_lock(&mdev->state_lock); + err = mlx4_en_do_set_mac(priv); mutex_unlock(&mdev->state_lock); + + return err; } static void mlx4_en_clear_list(struct net_device *dev) @@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { - queue_work(mdev->workqueue, &priv->mac_task); + mlx4_en_do_set_mac(priv); mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; } mutex_unlock(&mdev->state_lock); @@ -1632,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); + /* Remove flow steering rules for the port*/ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + ASSERT_RTNL(); + list_for_each_entry_safe(flow, tmp_flow, + &priv->ethtool_list, list) { + mlx4_flow_detach(mdev->dev, flow->id); + list_del(&flow->list); + } + } + mlx4_en_destroy_drop_qp(priv); /* Free TX Rings */ @@ -1652,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) mdev->mac_removed[priv->port] = 1; - /* Remove flow steering rules for the port*/ - if (mdev->dev->caps.steering_mode == - MLX4_STEERING_MODE_DEVICE_MANAGED) { - ASSERT_RTNL(); - list_for_each_entry_safe(flow, tmp_flow, - &priv->ethtool_list, list) { - mlx4_flow_detach(mdev->dev, flow->id); - list_del(&flow->list); - } - } - /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); @@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); - if (!priv->dev->rx_cpu_rmap) - goto err; + if (priv->mdev->dev->caps.comp_pool) { + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); + if (!priv->dev->rx_cpu_rmap) + goto err; + } #endif return 0; @@ -2078,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 251ae2f93116..8e3123a1df88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_slave_event_eq_info *event_eq = priv->mfunc.master.slave_state[slave].event_eq; u32 in_modifier = vhcr->in_modifier; - u32 eqn = in_modifier & 0x1FF; + u32 eqn = in_modifier & 0x3FF; u64 in_param = vhcr->in_param; int err = 0; int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 50917eb3013e..f6245579962d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + /* turn off device-managed steering capability if not enabled */ + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(field, outbox->buf, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d180bc46826a..16abde20e1fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, idx); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cf883345af88..d738454116a0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); static inline void set_param_l(u64 *arg, u32 val) { - *((u32 *)arg) = val; + *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; } static inline void set_param_h(u64 *arg, u32 val) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c313d7e943a9..f710b7ce0dcb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -509,7 +509,6 @@ struct mlx4_en_priv { struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct mac_task; struct work_struct watchdog_task; struct work_struct linkstate_task; struct delayed_work stats_task; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 602ca9bf78e4..f91719a08cba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { - u64 in_param; + u64 in_param = 0; u64 out_param; int err; @@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); @@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { - u64 param; + u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(¶m, index); @@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 1ac88637ad9d..00f223acada7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 719ead15e491..10c57c86388b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac); int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - u64 out_param; + u64 out_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - u64 out_param; + u64 out_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&out_param, port); @@ -361,7 +361,7 @@ out: int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) { - u64 out_param; + u64 out_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -406,7 +406,7 @@ out: void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 81e2abe07bbb..e891b058c1be 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { - u64 in_param; + u64 in_param = 0; u64 out_param; int err; @@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -319,7 +319,7 @@ err_out: static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { - u64 param; + u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(¶m, qpn); @@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, qpn); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 083fb48dc3d7..1391b52f443a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -99,6 +99,7 @@ struct res_qp { struct list_head mcg_list; spinlock_t mcg_spl; int local_qpn; + atomic_t ref_count; }; enum res_mtt_states { @@ -197,6 +198,7 @@ enum res_fs_rule_states { struct res_fs_rule { struct res_common com; + int qpn; }; static void *res_tracker_lookup(struct rb_root *root, u64 res_id) @@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev) return dev->caps.num_mpts - 1; } -static void *find_res(struct mlx4_dev *dev, int res_id, +static void *find_res(struct mlx4_dev *dev, u64 res_id, enum mlx4_resource type) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id) ret->local_qpn = id; INIT_LIST_HEAD(&ret->mcg_list); spin_lock_init(&ret->mcg_spl); + atomic_set(&ret->ref_count, 0); return &ret->com; } @@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id) return &ret->com; } -static struct res_common *alloc_fs_rule_tr(u64 id) +static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) { struct res_fs_rule *ret; @@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id) ret->com.res_id = id; ret->com.state = RES_FS_RULE_ALLOCATED; - + ret->qpn = qpn; return &ret->com; } @@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, ret = alloc_xrcdn_tr(id); break; case RES_FS_RULE: - ret = alloc_fs_rule_tr(id); + ret = alloc_fs_rule_tr(id, extra); break; default: return NULL; @@ -671,10 +674,14 @@ undo: static int remove_qp_ok(struct res_qp *res) { - if (res->com.state == RES_QP_BUSY) + if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || + !list_empty(&res->mcg_list)) { + pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", + res->com.state, atomic_read(&res->ref_count)); return -EBUSY; - else if (res->com.state != RES_QP_RESERVED) + } else if (res->com.state != RES_QP_RESERVED) { return -EPERM; + } return 0; } @@ -2990,6 +2997,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, u8 steer_type_mask = 2; enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; + if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0) + return -EINVAL; + qpn = vhcr->in_modifier & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) @@ -3121,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; int err; int qpn; + struct res_qp *rqp; struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct _rule_hw *rule_header; int header_id; @@ -3131,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; - err = get_res(dev, slave, qpn, RES_QP, NULL); + err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); return err; @@ -3172,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, if (err) goto err_put; - err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { mlx4_err(dev, "Fail to add flow steering resources.\n "); /* detach rule*/ mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + goto err_put; } + atomic_inc(&rqp->ref_count); err_put: put_res(dev, slave, qpn, RES_QP); return err; @@ -3192,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { int err; + struct res_qp *rqp; + struct res_fs_rule *rrule; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) return -EOPNOTSUPP; + err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); + if (err) + return err; + /* Release the rule form busy state before removal */ + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); + if (err) + return err; + err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { mlx4_err(dev, "Fail to remove flow steering resources.\n "); - return err; + goto out; } err = mlx4_cmd(dev, vhcr->in_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (!err) + atomic_dec(&rqp->ref_count); +out: + put_res(dev, slave, rrule->qpn, RES_QP); return err; } @@ -3803,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); /*VLAN*/ rem_slave_macs(dev, slave); + rem_slave_fs_rule(dev, slave); rem_slave_qps(dev, slave); rem_slave_srqs(dev, slave); rem_slave_cqs(dev, slave); @@ -3811,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_mtts(dev, slave); rem_slave_counters(dev, slave); rem_slave_xrcdns(dev, slave); - rem_slave_fs_rule(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index feda6c00829f..e329fe1f11b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, srqn); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a2..8fb481252e2c 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) for (; rxfc != 0; rxfc--) { rxh = ks8851_rdreg32(ks, KS_RXFHSR); rxstat = rxh & 0xffff; - rxlen = rxh >> 16; + rxlen = (rxh >> 16) & 0xfff; netif_dbg(ks, rx_status, ks->netdev, "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index c4122c86f829..efa29b712d5f 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, ndev); - if (lpc_mii_init(pldat) != 0) + ret = lpc_mii_init(pldat); + if (ret) goto err_out_unregister_netdev; netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 39ab4d09faaa..73ce7dd6b954 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) - skb->ip_summed = CHECKSUM_NONE; - else skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; napi_gro_receive(&adapter->napi, skb); (*work_done)++; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..edd63f1230f3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) } } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); + /* Make sure carrier is off and queue is stopped during loopback */ + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + ret = qlcnic_do_lb_test(adapter, mode); qlcnic_83xx_clear_lb_mode(adapter, mode); @@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) { struct qlcnic_cmd_args cmd; + struct net_device *netdev = adapter->netdev; int ret = 0; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); @@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_TX, &ret); if (ret) { - dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); + netdev_err(netdev, "Error getting Tx stats\n"); goto out; } /* Get MAC stats */ @@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_MAC, &ret); if (ret) { - dev_info(&adapter->pdev->dev, - "Error getting Rx stats\n"); + netdev_err(netdev, "Error getting MAC stats\n"); goto out; } /* Get Rx stats */ @@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_RX, &ret); if (ret) - dev_info(&adapter->pdev->dev, - "Error getting Tx stats\n"); + netdev_err(netdev, "Error getting Rx stats\n"); out: qlcnic_free_mbx_args(&cmd); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff3..5fa847fe388a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -358,8 +358,7 @@ set_flags: memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); } opcode = TX_ETHER_PKT; - if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { + if (skb_is_gso(skb)) { hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->total_hdr_length = hdr_len; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc3..5ef328af61d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -200,10 +200,10 @@ beacon_err: } err = qlcnic_config_led(adapter, b_state, b_rate); - if (!err) + if (!err) { err = len; - else ahw->beacon_state = b_state; + } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, max_sds_rings); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index a131d7b5d2fe..7e8d68263963 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -18,7 +18,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.31" +#define DRV_VERSION "v1.00.00.32" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 6f316ab23257..0780e039b271 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev, ecmd->supported = SUPPORTED_10000baseT_Full; ecmd->advertising = ADVERTISED_10000baseT_Full; - ecmd->autoneg = AUTONEG_ENABLE; ecmd->transceiver = XCVR_EXTERNAL; if ((qdev->link_status & STS_LINK_TYPE_MASK) == STS_LINK_TYPE_10GBASET) { ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); ecmd->port = PORT_TP; + ecmd->autoneg = AUTONEG_ENABLE; } else { ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..8033555e53c2 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1434,11 +1434,13 @@ map_error: } /* Categorizing receive firmware frame errors */ -static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) +static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, + struct rx_ring *rx_ring) { struct nic_stats *stats = &qdev->nic_stats; stats->rx_err_count++; + rx_ring->rx_errors++; switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { case IB_MAC_IOCB_RSP_ERR_CODE_ERR: @@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + put_page(lbq_desc->p.pg_chunk.page); + return; + } napi->dev = qdev->ndev; skb = napi_get_frags(napi); @@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, addr = lbq_desc->p.pg_chunk.va; prefetch(addr); + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + goto err_out; + } + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ @@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, memcpy(skb_put(new_skb, length), skb->data, length); skb = new_skb; + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); @@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, return; } + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ @@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); - return (unsigned long)length; - } - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { /* The data and headers are split into * separate buffers. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c3..4ecbe64a758d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) } } +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; @@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) return false; - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - + rtl_speed_down(tp); rtl_wol_suspend_quirk(tp); return true; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 33e96176e4d8..6ed333fe5c04 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) if (felic_stat & ECSR_LCHNG) { /* Link Changed */ if (mdp->cd->no_psr || mdp->no_ether_link) { - if (mdp->link == PHY_DOWN) - link_stat = 0; - else - link_stat = PHY_ST_LINK; + goto ignore_link; } else { link_stat = (sh_eth_read(ndev, PSR)); if (mdp->ether_link_active_low) @@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) } } +ignore_link: if (intr_status & EESR_TWB) { /* Write buck end. unused write back interrupt */ if (intr_status & EESR_TABT) /* Transmit Abort int */ @@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - u32 intr_status = 0; + unsigned long intr_status; spin_lock(&mdp->lock); - /* Get interrpt stat */ + /* Get interrupt status */ intr_status = sh_eth_read(ndev, EESR); + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_error() in order to quench + * it as it doesn't get cleared by just writing 1 to the ECI bit... + */ + intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; /* Clear interrupt */ if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | @@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) struct phy_device *phydev = mdp->phydev; int new_state = 0; - if (phydev->link != PHY_DOWN) { + if (phydev->link) { if (phydev->duplex != mdp->duplex) { new_state = 1; mdp->duplex = phydev->duplex; @@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) if (mdp->cd->set_rate) mdp->cd->set_rate(ndev); } - if (mdp->link == PHY_DOWN) { + if (!mdp->link) { sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); new_state = 1; mdp->link = phydev->link; + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_enable(ndev); } } else if (mdp->link) { new_state = 1; - mdp->link = PHY_DOWN; + mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); } if (new_state && netif_msg_link(mdp)) @@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev) snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, mdp->mii_bus->id , mdp->phy_id); - mdp->link = PHY_DOWN; + mdp->link = 0; mdp->speed = 0; mdp->duplex = -1; @@ -2220,6 +2228,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) /* MDIO bus release function */ static int sh_mdio_release(struct net_device *ndev) { + struct sh_eth_private *mdp = netdev_priv(ndev); struct mii_bus *bus = dev_get_drvdata(&ndev->dev); /* unregister mdio bus */ @@ -2234,6 +2243,9 @@ static int sh_mdio_release(struct net_device *ndev) /* free bitbang info */ free_mdio_bitbang(bus); + /* free bitbang memory */ + kfree(mdp->bitbang); + return 0; } @@ -2262,6 +2274,7 @@ static int sh_mdio_init(struct net_device *ndev, int id, bitbang->ctrl.ops = &bb_ops; /* MII controller setting */ + mdp->bitbang = bitbang; mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!mdp->mii_bus) { ret = -ENOMEM; @@ -2441,6 +2454,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } mdp->tsu_addr = ioremap(rtsu->start, resource_size(rtsu)); + if (mdp->tsu_addr == NULL) { + ret = -ENOMEM; + dev_err(&pdev->dev, "TSU ioremap failed.\n"); + goto out_release; + } mdp->port = devno % 2; ndev->features = NETIF_F_HW_VLAN_FILTER; } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index bae84fd2e73a..828be4515008 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -705,6 +705,7 @@ struct sh_eth_private { const u16 *reg_offset; void __iomem *addr; void __iomem *tsu_addr; + struct bb_info *bitbang; u32 num_rx_ring; u32 num_tx_ring; dma_addr_t rx_desc_dma; @@ -722,7 +723,7 @@ struct sh_eth_private { u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ struct phy_device *phydev; /* PHY device control */ - enum phy_state link; + int link; phy_interface_t phy_interface; int msg_enable; int speed; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 50247dfe8f57..d2f790df6dcb 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) * TX scheduler is stopped when we're done and before * netif_device_present() becomes false. */ - netif_tx_lock(dev); + netif_tx_lock_bh(dev); netif_device_detach(dev); - netif_tx_unlock(dev); + netif_tx_unlock_bh(dev); } #endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 0ad790cc473c..eaa8e874a3cb 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) return false; tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 + && tx_queue->write_count - write_count == 1; } /* For each entry inserted into the software descriptor ring, create a diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 879ff5849bbd..bb579a6128c8 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->u.page = page; - rx_buf->page_offset = page_offset; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->flags = EFX_RX_BUF_PAGE; ++rx_queue->added_count; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 0c74a702d461..50617c5a0bdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) { writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 01ffbc486982..4781d3d8e182 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) * queue is stopped then start the queue as we have free desc for tx */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); cpts_tx_timestamp(priv->cpts, skb); priv->stats.tx_packets++; priv->stats.tx_bytes += len; @@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(cpdma_check_free_tx_desc(priv->txch))) + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) netif_stop_queue(ndev); return NETDEV_TX_OK; @@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *mdio; parp = of_get_property(slave_node, "phy_id", &lenp); - if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { + if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { pr_err("Missing slave[%d] phy_id property\n", i); ret = -EINVAL; goto error_ret; @@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); if (data->dual_emac) { - if (of_property_read_u32(node, "dual_emac_res_vlan", + if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { pr_err("Missing dual_emac_res_vlan in DT.\n"); slave_data->dual_emac_res_vlan = i+1; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 52c05366599a..72300bc9e378 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) * queue is stopped then start the queue as we have free desc for tx */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) netif_stop_queue(ndev); return NETDEV_TX_OK; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index e5b19b056909..3c4d6274bb9b 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out: + if (rrpriv->evt_ring) + pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring, + rrpriv->evt_ring_dma); if (rrpriv->rx_ring) pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, rrpriv->rx_ring_dma); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd77483da50..f5f0f09e4cc5 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device, packet->trans_id; /* Notify the layer above us */ - nvsc_packet->completion.send.send_completion( - nvsc_packet->completion.send.send_completion_ctx); + if (nvsc_packet) + nvsc_packet->completion.send.send_completion( + nvsc_packet->completion.send. + send_completion_ctx); num_outstanding_sends = atomic_dec_return(&net_device->num_outstanding_sends); @@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device, int ret = 0; struct nvsp_message sendMessage; struct net_device *ndev; + u64 req_id; net_device = get_outbound_net_device(device); if (!net_device) @@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device, 0xFFFFFFFF; sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; + if (packet->completion.send.send_completion) + req_id = (u64)packet; + else + req_id = 0; + if (packet->page_buf_cnt) { ret = vmbus_sendpacket_pagebuffer(device->channel, packet->page_buf, packet->page_buf_cnt, &sendMessage, sizeof(struct nvsp_message), - (unsigned long)packet); + req_id); } else { ret = vmbus_sendpacket(device->channel, &sendMessage, sizeof(struct nvsp_message), - (unsigned long)packet, + req_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - } if (ret == 0) { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12b..8341b62e5521 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, if (status == 1) { netif_carrier_on(net); - netif_wake_queue(net); ndev_ctx = netdev_priv(net); schedule_delayed_work(&ndev_ctx->dwork, 0); schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); } else { netif_carrier_off(net); - netif_tx_disable(net); } } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a8..0775f0aefd1e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -61,9 +61,6 @@ struct rndis_request { static void rndis_filter_send_completion(void *ctx); -static void rndis_filter_send_request_completion(void *ctx); - - static struct rndis_device *get_rndis_device(void) { @@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, packet->page_buf[0].len; } - packet->completion.send.send_completion_ctx = req;/* packet; */ - packet->completion.send.send_completion = - rndis_filter_send_request_completion; - packet->completion.send.send_completion_tid = (unsigned long)dev; + packet->completion.send.send_completion = NULL; ret = netvsc_send(dev->net_dev->dev, packet); return ret; @@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx) /* Pass it back to the original handler */ filter_pkt->completion(filter_pkt->completion_ctx); } - - -static void rndis_filter_send_request_completion(void *ctx) -{ - /* Noop */ -} diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 417b2af1aa80..73abbc1655d5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &macvlan_hard_header_ops, diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 37add21a3d7d..59ac143dec25 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -666,6 +666,7 @@ static int netconsole_netdev_event(struct notifier_block *this, goto done; spin_lock_irqsave(&target_list_lock, flags); +restart: list_for_each_entry(nt, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { @@ -678,15 +679,17 @@ static int netconsole_netdev_event(struct notifier_block *this, case NETDEV_UNREGISTER: /* * rtnl_lock already held + * we might sleep in __netpoll_cleanup() */ - if (nt->np.dev) { - __netpoll_cleanup(&nt->np); - dev_put(nt->np.dev); - nt->np.dev = NULL; - } + spin_unlock_irqrestore(&target_list_lock, flags); + __netpoll_cleanup(&nt->np); + spin_lock_irqsave(&target_list_lock, flags); + dev_put(nt->np.dev); + nt->np.dev = NULL; nt->enabled = 0; stopped = true; - break; + netconsole_target_put(nt); + goto restart; } } netconsole_target_put(nt); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 05c5efe84591..bf3419297875 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1138,6 +1138,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2c6a22e278ea..729ed533bb33 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; skb_orphan(skb); + nf_reset(skb); + /* Enqueue packet */ skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); @@ -1592,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (tun->flags & TUN_TAP_MQ && (tun->numqueues + tun->numdisabled > 1)) - return err; + return -EBUSY; } else { char *name; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3b6e9b83342d..7c769d8e25ad 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -268,7 +268,7 @@ config USB_NET_SMSC75XX select CRC16 select CRC32 help - This option adds support for SMSC LAN95XX based USB 2.0 + This option adds support for SMSC LAN75XX based USB 2.0 Gigabit Ethernet adapters. config USB_NET_SMSC95XX diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 248d2dc765a5..6bd91676d2cb 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) struct cdc_ncm_ctx *ctx; struct usb_driver *subdriver = ERR_PTR(-ENODEV); int ret = -ENODEV; - u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; + u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); struct cdc_mbim_state *info = (void *)&dev->data; - /* see if interface supports MBIM alternate setting */ - if (intf->num_altsetting == 2) { - if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) - usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM); - data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM; - } - /* Probably NCM, defer for cdc_ncm_bind */ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; @@ -143,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb goto error; if (skb) { - if (skb->len <= sizeof(ETH_HLEN)) + if (skb->len <= ETH_HLEN) goto error; /* mapping VLANs to MBIM sessions: diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 61b74a2b89ac..4709fa3497cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -55,6 +55,14 @@ #define DRIVER_VERSION "14-Mar-2012" +#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) +static bool prefer_mbim = true; +#else +static bool prefer_mbim; +#endif +module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); + static void cdc_ncm_txpath_bh(unsigned long param); static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); @@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) } EXPORT_SYMBOL_GPL(cdc_ncm_unbind); -static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) +/* Select the MBIM altsetting iff it is preferred and available, + * returning the number of the corresponding data interface altsetting + */ +u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) { - int ret; + struct usb_host_interface *alt; /* The MBIM spec defines a NCM compatible default altsetting, * which we may have matched: @@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) * endpoint descriptors, shall be constructed according to * the rules given in section 6 (USB Device Model) of this * specification." - * - * Do not bind to such interfaces, allowing cdc_mbim to handle - * them */ -#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) - if ((intf->num_altsetting == 2) && - !usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM)) { - if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) - return -ENODEV; - else - usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_NCM); + if (prefer_mbim && intf->num_altsetting == 2) { + alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); + if (alt && cdc_ncm_comm_intf_is_mbim(alt) && + !usb_set_interface(dev->udev, + intf->cur_altsetting->desc.bInterfaceNumber, + CDC_NCM_COMM_ALTSETTING_MBIM)) + return CDC_NCM_DATA_ALTSETTING_MBIM; } -#endif + return CDC_NCM_DATA_ALTSETTING_NCM; +} +EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); + +static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int ret; + + /* MBIM backwards compatible function? */ + cdc_ncm_select_altsetting(dev, intf); + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) + return -ENODEV; /* NCM data altsetting is always 1 */ ret = cdc_ncm_bind_common(dev, intf, 1); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index efb5c7c33a28..2a3579f67910 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> +#include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> @@ -52,6 +53,96 @@ struct qmi_wwan_state { struct usb_interface *data; }; +/* default ethernet address used by the modem */ +static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; + +/* Make up an ethernet header if the packet doesn't have one. + * + * A firmware bug common among several devices cause them to send raw + * IP packets under some circumstances. There is no way for the + * driver/host to know when this will happen. And even when the bug + * hits, some packets will still arrive with an intact header. + * + * The supported devices are only capably of sending IPv4, IPv6 and + * ARP packets on a point-to-point link. Any packet with an ethernet + * header will have either our address or a broadcast/multicast + * address as destination. ARP packets will always have a header. + * + * This means that this function will reliably add the appropriate + * header iff necessary, provided our hardware address does not start + * with 4 or 6. + * + * Another common firmware bug results in all packets being addressed + * to 00:a0:c6:00:00:00 despite the host address being different. + * This function will also fixup such packets. + */ +static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + __be16 proto; + + /* usbnet rx_complete guarantees that skb->len is at least + * hard_header_len, so we can inspect the dest address without + * checking skb->len + */ + switch (skb->data[0] & 0xf0) { + case 0x40: + proto = htons(ETH_P_IP); + break; + case 0x60: + proto = htons(ETH_P_IPV6); + break; + case 0x00: + if (is_multicast_ether_addr(skb->data)) + return 1; + /* possibly bogus destination - rewrite just in case */ + skb_reset_mac_header(skb); + goto fix_dest; + default: + /* pass along other packets without modifications */ + return 1; + } + if (skb_headroom(skb) < ETH_HLEN) + return 0; + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + eth_hdr(skb)->h_proto = proto; + memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); +fix_dest: + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); + return 1; +} + +/* very simplistic detection of IPv4 or IPv6 headers */ +static bool possibly_iphdr(const char *data) +{ + return (data[0] & 0xd0) == 0x40; +} + +/* disallow addresses which may be confused with IP headers */ +static int qmi_wwan_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret < 0) + return ret; + if (possibly_iphdr(addr->sa_data)) + return -EADDRNOTAVAIL; + eth_commit_mac_addr_change(dev, p); + return 0; +} + +static const struct net_device_ops qmi_wwan_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = usbnet_change_mtu, + .ndo_set_mac_address = qmi_wwan_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + /* using a counter to merge subdriver requests with our own into a combined state */ static int qmi_wwan_manage_power(struct usbnet *dev, int on) { @@ -139,16 +230,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); - /* control and data is shared? */ - if (intf->cur_altsetting->desc.bNumEndpoints == 3) { - info->control = intf; - info->data = intf; - goto shared; - } - - /* else require a single interrupt status endpoint on control intf */ - if (intf->cur_altsetting->desc.bNumEndpoints != 1) - goto err; + /* set up initial state */ + info->control = intf; + info->data = intf; /* and a number of CDC descriptors */ while (len > 3) { @@ -207,25 +291,14 @@ next_desc: buf += h->bLength; } - /* did we find all the required ones? */ - if (!(found & (1 << USB_CDC_HEADER_TYPE)) || - !(found & (1 << USB_CDC_UNION_TYPE))) { - dev_err(&intf->dev, "CDC functional descriptors missing\n"); - goto err; - } - - /* verify CDC Union */ - if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { - dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); - goto err; - } - - /* need to save these for unbind */ - info->control = intf; - info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); - if (!info->data) { - dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); - goto err; + /* Use separate control and data interfaces if we found a CDC Union */ + if (cdc_union) { + info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); + if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { + dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", + cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); + goto err; + } } /* errors aren't fatal - we can live with the dynamic address */ @@ -235,17 +308,30 @@ next_desc: } /* claim data interface and set it up */ - status = usb_driver_claim_interface(driver, info->data, dev); - if (status < 0) - goto err; + if (info->control != info->data) { + status = usb_driver_claim_interface(driver, info->data, dev); + if (status < 0) + goto err; + } -shared: status = qmi_wwan_register_subdriver(dev); if (status < 0 && info->control != info->data) { usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver, info->data); } + /* Never use the same address on both ends of the link, even + * if the buggy firmware told us to. + */ + if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) + eth_hw_addr_random(dev->net); + + /* make MAC addr easily distinguishable from an IP header */ + if (possibly_iphdr(dev->net->dev_addr)) { + dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ + dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ + } + dev->net->netdev_ops = &qmi_wwan_netdev_ops; err: return status; } @@ -324,6 +410,7 @@ static const struct driver_info qmi_wwan_info = { .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, + .rx_fixup = qmi_wwan_rx_fixup, }; #define HUAWEI_VENDOR_ID 0x12D1 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9abe51710f22..1a15ec14c386 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); + int ret; + + if (new_mtu > MAX_SINGLE_PACKET_SIZE) + return -EINVAL; - int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); + ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac rx frame length\n"); return ret; @@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); - ret = smsc75xx_set_rx_max_frame_length(dev, 1514); + ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set max rx frame length\n"); return ret; @@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { - /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ - if (unlikely(size > (ETH_FRAME_LEN + 12))) { + /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ + if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 4aad350e4dae..eae7a03d4f9b 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->num_rx_queues = num_rx_queues; adapter->num_tx_queues = num_tx_queues; + adapter->rx_buf_per_pkt = 1; size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index a0feb17a0238..63a124340cbe 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev, VMXNET3_RX_RING_MAX_SIZE) return -EINVAL; + /* if adapter not yet initialized, do nothing */ + if (adapter->rx_buf_per_pkt == 0) { + netdev_err(netdev, "adapter not completely initialized, " + "ring size cannot be changed yet\n"); + return -EOPNOTSUPP; + } /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3198384689d9..35418146fa17 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -70,10 +70,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f10e58ac9c1b..7cee7a3068ec 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); tunnel_ip_select_ident(skb, old_iph, &rt->dst); + nf_reset(skb); + vxlan_set_owner(dev, skb); /* See iptunnel_xmit() */ @@ -1504,6 +1506,14 @@ static __net_init int vxlan_init_net(struct net *net) static __net_exit void vxlan_exit_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_dev *vxlan; + unsigned h; + + rtnl_lock(); + for (h = 0; h < VNI_HASH_SIZE; ++h) + hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) + dev_close(vxlan->dev); + rtnl_unlock(); if (vn->sock) { sk_release_kernel(vn->sock->sk); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 4cc13940c895..f76c3ca07a45 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL; + /* Use chip chainmask only for calibration */ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); if (rtt) { @@ -1150,6 +1151,9 @@ skip_tx_iqcal: ar9003_hw_rtt_disable(ah); } + /* Revert chainmask to runtime parameters */ + ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); + /* Initialize list pointers */ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 28fd99203f64..bdee2ed67219 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = { {0x00008258, 0x00000000}, {0x0000825c, 0x40000000}, {0x00008260, 0x00080922}, - {0x00008264, 0x9bc00010}, + {0x00008264, 0x9d400010}, {0x00008268, 0xffffffff}, {0x0000826c, 0x0000ffff}, {0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c index 467b60014b7b..73fe8d6db566 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c @@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) u32 sz, i; struct channel_detector *cd; - cd = kmalloc(sizeof(*cd), GFP_KERNEL); + cd = kmalloc(sizeof(*cd), GFP_ATOMIC); if (cd == NULL) goto fail; INIT_LIST_HEAD(&cd->head); cd->freq = freq; sz = sizeof(cd->detectors) * dpd->num_radar_types; - cd->detectors = kzalloc(sz, GFP_KERNEL); + cd->detectors = kzalloc(sz, GFP_ATOMIC); if (cd->detectors == NULL) goto fail; diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c index 91b8dceeadb1..5e48c5515b8c 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c @@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) { struct pulse_elem *p = pool_get_pulse_elem(); if (p == NULL) { - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kmalloc(sizeof(*p), GFP_ATOMIC); if (p == NULL) { DFS_POOL_STAT_INC(pulse_alloc_error); return false; @@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde, ps.deadline_ts = ps.first_ts + ps.dur; new_ps = pool_get_pseq_elem(); if (new_ps == NULL) { - new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); + new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC); if (new_ps == NULL) { DFS_POOL_STAT_INC(pseq_alloc_error); return false; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 716058b67557..a47f5e05fc04 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || - priv->fw_version_minor != MINOR_VERSION_REQ) { + priv->fw_version_minor < MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index ade3afb21f91..7fdac6c7b3ea 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work) int i; bool needreset = false; - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) - if (ATH_TXQ_SETUP(sc, i)) { - txq = &sc->tx.txq[i]; - ath_txq_lock(sc, txq); - if (txq->axq_depth) { - if (txq->axq_tx_inprogress) { - needreset = true; - ath_txq_unlock(sc, txq); - break; - } else { - txq->axq_tx_inprogress = true; - } + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + txq = sc->tx.txq_map[i]; + + ath_txq_lock(sc, txq); + if (txq->axq_depth) { + if (txq->axq_tx_inprogress) { + needreset = true; + ath_txq_unlock(sc, txq); + break; + } else { + txq->axq_tx_inprogress = true; } - ath_txq_unlock_complete(sc, txq); } + ath_txq_unlock_complete(sc, txq); + } if (needreset) { ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, @@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data) { struct ath_softc *sc = (struct ath_softc *)data; - ieee80211_queue_work(sc->hw, &sc->hw_check_work); + if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) + ieee80211_queue_work(sc->hw, &sc->hw_check_work); } /* diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6e66f9c6782b..988372d218a4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) if (r) { ath_err(common, "Unable to reset channel, reset status %d\n", r); + + ath9k_hw_enable_interrupts(ah); + ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); + goto out; } diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 38bc5a7997ff..122146943bf2 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; + static const struct b43_txstatus fake; /* filled with 0 */ + const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; + int skip; + static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; + + skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in - * malfunction, memory leaks and/or stall of DMA functionality. */ - b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " - "Expected %d, but got %d\n", - ring->index, firstused, slot); - return; + * malfunction, memory leaks and/or stall of DMA functionality. + */ + if (slot == next_slot(ring, next_slot(ring, firstused))) { + /* If a single header/data pair was missed, skip over + * the first two slots in an attempt to recover. + */ + slot = firstused; + skip = 2; + if (!err_out1) { + /* Report the error once. */ + b43dbg(dev->wl, + "Skip on DMA ring %d slot %d.\n", + ring->index, slot); + err_out1 = 1; + } + } else { + /* More than a single header/data pair were missed. + * Report this error once. + */ + if (!err_out2) + b43dbg(dev->wl, + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + err_out2 = 1; + return; + } } ops = ring->ops; @@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, slot, firstused, ring->index); break; } + if (meta->skb) { struct b43_private_tx_info *priv_info = - b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); - unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { @@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { - /* This is a scatter-gather fragment of a frame, so - * the skb pointer must not be NULL. */ + /* This is a scatter-gather fragment of a frame, + * so the skb pointer must not be NULL. + */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); @@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* * Call back to inform the ieee80211 subsystem about - * the status of the transmission. + * the status of the transmission. When skipping over + * a missed TX status report, use a status structure + * filled with zeros to indicate that the frame was not + * sent (frame_count 0) and not acknowledged */ - frame_succeed = b43_fill_txstatus_report(dev, info, status); + if (unlikely(skip)) + txstat = &fake; + else + txstat = status; + + frame_succeed = b43_fill_txstatus_report(dev, info, + txstat); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; @@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; - if (meta->is_last_fragment) { + if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); + if (skip > 0) + --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3c35382ee6c2..b70f220bc4b3 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) u16 clip_off[2] = { 0xFFFF, 0xFFFF }; u8 vcm_final = 0; - s8 offset[4]; + s32 offset[4]; s32 results[8][4] = { }; s32 results_min[4] = { }; s32 poll_results[4] = { }; @@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) } for (i = 0; i < 4; i += 2) { s32 curr; - s32 mind = 40; + s32 mind = 0x100000; s32 minpoll = 249; u8 minvcm = 0; if (2 * core != i) @@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) u8 regs_save_radio[2]; u16 regs_save_phy[2]; - s8 offset[4]; + s32 offset[4]; u8 core; u8 rail; @@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) } for (i = 0; i < 4; i++) { - s32 mind = 40; + s32 mind = 0x100000; u8 minvcm = 0; s32 minpoll = 249; s32 curr; @@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: - /* FIXME */ + ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, + avoid); break; #endif } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 4469321c0eb3..35fc68be158d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) goto err; } - /* External image takes precedence if specified */ if (brcmf_sdbrcm_download_code_file(bus)) { brcmf_err("dongle image file download failed\n"); goto err; } - /* External nvram takes precedence if specified */ - if (brcmf_sdbrcm_download_nvram(bus)) + if (brcmf_sdbrcm_download_nvram(bus)) { brcmf_err("dongle nvram file download failed\n"); + goto err; + } /* Take arm out of reset */ if (brcmf_sdbrcm_download_state(bus, false)) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 2af9c0f0798d..78da3eff75e8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -1891,8 +1891,10 @@ static s32 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, const u8 *mac_addr, struct key_params *params) { + struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_wsec_key key; s32 err = 0; + u8 keybuf[8]; memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; @@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(CONN, "Setting the key index %d\n", key.index); memcpy(key.data, params->key, key.len); - if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { - u8 keybuf[8]; + if ((ifp->vif->mode != WL_MODE_AP) && + (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { + brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); @@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, break; case WLAN_CIPHER_SUITE_TKIP: if (ifp->vif->mode != WL_MODE_AP) { - brcmf_dbg(CONN, "Swapping key\n"); + brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); @@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, err = -EAGAIN; goto done; } - switch (wsec & ~SES_OW_ENABLED) { - case WEP_ENABLED: + if (wsec & WEP_ENABLED) { sec = &profile->sec; if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { params.cipher = WLAN_CIPHER_SUITE_WEP40; @@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, params.cipher = WLAN_CIPHER_SUITE_WEP104; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); } - break; - case TKIP_ENABLED: + } else if (wsec & TKIP_ENABLED) { params.cipher = WLAN_CIPHER_SUITE_TKIP; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); - break; - case AES_ENABLED: + } else if (wsec & AES_ENABLED) { params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); - break; - default: + } else { brcmf_err("Invalid algo (0x%x)\n", wsec); err = -EINVAL; goto done; @@ -3824,8 +3823,9 @@ exit: static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); - s32 err = -EPERM; + s32 err; struct brcmf_fil_bss_enable_le bss_enable; + struct brcmf_join_params join_params; brcmf_dbg(TRACE, "Enter\n"); @@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) /* Due to most likely deauths outstanding we sleep */ /* first to make sure they get processed by fw. */ msleep(400); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); - if (err < 0) { - brcmf_err("setting AP mode failed %d\n", err); - goto exit; - } + + memset(&join_params, 0, sizeof(join_params)); + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, + &join_params, sizeof(join_params)); + if (err < 0) + brcmf_err("SET SSID error (%d)\n", err); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); - if (err < 0) { + if (err < 0) brcmf_err("BRCMF_C_UP error %d\n", err); - goto exit; - } + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); + if (err < 0) + brcmf_err("setting AP mode failed %d\n", err); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); + if (err < 0) + brcmf_err("setting INFRA mode failed %d\n", err); } else { bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); bss_enable.enable = cpu_to_le32(0); @@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); -exit: return err; } @@ -4124,10 +4128,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { }, { .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE) - }, - { - .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, @@ -4183,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_DEVICE); + BIT(NL80211_IFTYPE_P2P_GO); wiphy->iface_combinations = brcmf_iface_combos; wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index c6451c61407a..e2340b231aa1 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br) } } +/** + * This function frees the WL per-device resources. + * + * This function frees resources owned by the WL device pointed to + * by the wl parameter. + * + * precondition: can both be called locked and unlocked + * + */ +static void brcms_free(struct brcms_info *wl) +{ + struct brcms_timer *t, *next; + + /* free ucode data */ + if (wl->fw.fw_cnt) + brcms_ucode_data_free(&wl->ucode); + if (wl->irq) + free_irq(wl->irq, wl); + + /* kill dpc */ + tasklet_kill(&wl->tasklet); + + if (wl->pub) { + brcms_debugfs_detach(wl->pub); + brcms_c_module_unregister(wl->pub, "linux", wl); + } + + /* free common resources */ + if (wl->wlc) { + brcms_c_detach(wl->wlc); + wl->wlc = NULL; + wl->pub = NULL; + } + + /* virtual interface deletion is deferred so we cannot spinwait */ + + /* wait for all pending callbacks to complete */ + while (atomic_read(&wl->callbacks) > 0) + schedule(); + + /* free timers */ + for (t = wl->timers; t; t = next) { + next = t->next; +#ifdef DEBUG + kfree(t->name); +#endif + kfree(t); + } +} + +/* +* called from both kernel as from this kernel module (error flow on attach) +* precondition: perimeter lock is not acquired. +*/ +static void brcms_remove(struct bcma_device *pdev) +{ + struct ieee80211_hw *hw = bcma_get_drvdata(pdev); + struct brcms_info *wl = hw->priv; + + if (wl->wlc) { + wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); + wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); + ieee80211_unregister_hw(hw); + } + + brcms_free(wl); + + bcma_set_drvdata(pdev, NULL); + ieee80211_free_hw(hw); +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static void brcms_release_fw(struct brcms_info *wl) +{ + int i; + for (i = 0; i < MAX_FW_IMAGES; i++) { + release_firmware(wl->fw.fw_bin[i]); + release_firmware(wl->fw.fw_hdr[i]); + } +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) +{ + int status; + struct device *device = &pdev->dev; + char fw_name[100]; + int i; + + memset(&wl->fw, 0, sizeof(struct brcms_firmware)); + for (i = 0; i < MAX_FW_IMAGES; i++) { + if (brcms_firmwares[i] == NULL) + break; + sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], + UCODE_LOADER_API_VER); + status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); + if (status) { + wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", + KBUILD_MODNAME, fw_name); + return status; + } + sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], + UCODE_LOADER_API_VER); + status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); + if (status) { + wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", + KBUILD_MODNAME, fw_name); + return status; + } + wl->fw.hdr_num_entries[i] = + wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); + } + wl->fw.fw_cnt = i; + status = brcms_ucode_data_init(wl, &wl->ucode); + brcms_release_fw(wl); + return status; +} + static void brcms_ops_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw) if (!blocked) wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); + if (!wl->ucode.bcm43xx_bomminor) { + err = brcms_request_fw(wl, wl->wlc->hw->d11core); + if (err) { + brcms_remove(wl->wlc->hw->d11core); + return -ENOENT; + } + } + spin_lock_bh(&wl->lock); /* avoid acknowledging frames before a non-monitor device is added */ wl->mute_tx = true; @@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data) wake_up(&wl->tx_flush_wq); } -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) -{ - int status; - struct device *device = &pdev->dev; - char fw_name[100]; - int i; - - memset(&wl->fw, 0, sizeof(struct brcms_firmware)); - for (i = 0; i < MAX_FW_IMAGES; i++) { - if (brcms_firmwares[i] == NULL) - break; - sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], - UCODE_LOADER_API_VER); - status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); - if (status) { - wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", - KBUILD_MODNAME, fw_name); - return status; - } - sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], - UCODE_LOADER_API_VER); - status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); - if (status) { - wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", - KBUILD_MODNAME, fw_name); - return status; - } - wl->fw.hdr_num_entries[i] = - wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); - } - wl->fw.fw_cnt = i; - return brcms_ucode_data_init(wl, &wl->ucode); -} - -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static void brcms_release_fw(struct brcms_info *wl) -{ - int i; - for (i = 0; i < MAX_FW_IMAGES; i++) { - release_firmware(wl->fw.fw_bin[i]); - release_firmware(wl->fw.fw_hdr[i]); - } -} - -/** - * This function frees the WL per-device resources. - * - * This function frees resources owned by the WL device pointed to - * by the wl parameter. - * - * precondition: can both be called locked and unlocked - * - */ -static void brcms_free(struct brcms_info *wl) -{ - struct brcms_timer *t, *next; - - /* free ucode data */ - if (wl->fw.fw_cnt) - brcms_ucode_data_free(&wl->ucode); - if (wl->irq) - free_irq(wl->irq, wl); - - /* kill dpc */ - tasklet_kill(&wl->tasklet); - - if (wl->pub) { - brcms_debugfs_detach(wl->pub); - brcms_c_module_unregister(wl->pub, "linux", wl); - } - - /* free common resources */ - if (wl->wlc) { - brcms_c_detach(wl->wlc); - wl->wlc = NULL; - wl->pub = NULL; - } - - /* virtual interface deletion is deferred so we cannot spinwait */ - - /* wait for all pending callbacks to complete */ - while (atomic_read(&wl->callbacks) > 0) - schedule(); - - /* free timers */ - for (t = wl->timers; t; t = next) { - next = t->next; -#ifdef DEBUG - kfree(t->name); -#endif - kfree(t); - } -} - -/* -* called from both kernel as from this kernel module (error flow on attach) -* precondition: perimeter lock is not acquired. -*/ -static void brcms_remove(struct bcma_device *pdev) -{ - struct ieee80211_hw *hw = bcma_get_drvdata(pdev); - struct brcms_info *wl = hw->priv; - - if (wl->wlc) { - wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); - wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); - ieee80211_unregister_hw(hw); - } - - brcms_free(wl); - - bcma_set_drvdata(pdev, NULL); - ieee80211_free_hw(hw); -} - static irqreturn_t brcms_isr(int irq, void *dev_id) { struct brcms_info *wl; @@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) spin_lock_init(&wl->lock); spin_lock_init(&wl->isr_lock); - /* prepare ucode */ - if (brcms_request_fw(wl, pdev) < 0) { - wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " - "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); - brcms_release_fw(wl); - brcms_remove(pdev); - return NULL; - } - /* common load-time initialization */ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); - brcms_release_fw(wl); if (!wl->wlc) { wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", KBUILD_MODNAME, err); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 21a824232478..18d37645e2cd 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, gain0_15 = ((biq1 & 0xf) << 12) | ((tia & 0xf) << 8) | ((lna2 & 0x3) << 6) | - ((lna2 & 0x3) << 4) | - ((lna1 & 0x3) << 2) | - ((lna1 & 0x3) << 0); + ((lna2 & + 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); @@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, } mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); - mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); - mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); } @@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; } -static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, - u16 tia_gain, u16 lna2_gain) -{ - u32 i_thresh_l, q_thresh_l; - u32 i_thresh_h, q_thresh_h; - struct lcnphy_iq_est iq_est_h, iq_est_l; - - wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, - lna2_gain, 0); - - wlc_lcnphy_rx_gain_override_enable(pi, true); - wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); - udelay(500); - write_radio_reg(pi, RADIO_2064_REG112, 0); - if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) - return false; - - wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); - udelay(500); - write_radio_reg(pi, RADIO_2064_REG112, 0); - if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) - return false; - - i_thresh_l = (iq_est_l.i_pwr << 1); - i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; - - q_thresh_l = (iq_est_l.q_pwr << 1); - q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; - if ((iq_est_h.i_pwr > i_thresh_l) && - (iq_est_h.i_pwr < i_thresh_h) && - (iq_est_h.q_pwr > q_thresh_l) && - (iq_est_h.q_pwr < q_thresh_h)) - return true; - - return false; -} - static bool wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, const struct lcnphy_rx_iqcomp *iqcomp, @@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, rfoverride3_old, rfoverride3val_old, rfoverride4_old, rfoverride4val_old, afectrlovr_old, afectrlovrval_old; - int tia_gain, lna2_gain, biq1_gain; - bool set_gain; + int tia_gain; + u32 received_power, rx_pwr_threshold; u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; u16 values_to_save[11]; s16 *ptr; @@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, goto cal_done; } - WARN_ON(module != 1); - tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - - for (i = 0; i < 11; i++) - values_to_save[i] = - read_radio_reg(pi, rxiq_cal_rf_reg[i]); - Core1TxControl_old = read_phy_reg(pi, 0x631); - - or_phy_reg(pi, 0x631, 0x0015); - - RFOverride0_old = read_phy_reg(pi, 0x44c); - RFOverrideVal0_old = read_phy_reg(pi, 0x44d); - rfoverride2_old = read_phy_reg(pi, 0x4b0); - rfoverride2val_old = read_phy_reg(pi, 0x4b1); - rfoverride3_old = read_phy_reg(pi, 0x4f9); - rfoverride3val_old = read_phy_reg(pi, 0x4fa); - rfoverride4_old = read_phy_reg(pi, 0x938); - rfoverride4val_old = read_phy_reg(pi, 0x939); - afectrlovr_old = read_phy_reg(pi, 0x43b); - afectrlovrval_old = read_phy_reg(pi, 0x43c); - old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); - old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); - - tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); - if (tx_gain_override_old) { - wlc_lcnphy_get_tx_gain(pi, &old_gains); - tx_gain_index_old = pi_lcn->lcnphy_current_index; - } - - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); + if (module == 1) { - mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); + tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); + wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); + for (i = 0; i < 11; i++) + values_to_save[i] = + read_radio_reg(pi, rxiq_cal_rf_reg[i]); + Core1TxControl_old = read_phy_reg(pi, 0x631); + + or_phy_reg(pi, 0x631, 0x0015); + + RFOverride0_old = read_phy_reg(pi, 0x44c); + RFOverrideVal0_old = read_phy_reg(pi, 0x44d); + rfoverride2_old = read_phy_reg(pi, 0x4b0); + rfoverride2val_old = read_phy_reg(pi, 0x4b1); + rfoverride3_old = read_phy_reg(pi, 0x4f9); + rfoverride3val_old = read_phy_reg(pi, 0x4fa); + rfoverride4_old = read_phy_reg(pi, 0x938); + rfoverride4val_old = read_phy_reg(pi, 0x939); + afectrlovr_old = read_phy_reg(pi, 0x43b); + afectrlovrval_old = read_phy_reg(pi, 0x43c); + old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); + old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); + + tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); + if (tx_gain_override_old) { + wlc_lcnphy_get_tx_gain(pi, &old_gains); + tx_gain_index_old = pi_lcn->lcnphy_current_index; + } - write_radio_reg(pi, RADIO_2064_REG116, 0x06); - write_radio_reg(pi, RADIO_2064_REG12C, 0x07); - write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); - write_radio_reg(pi, RADIO_2064_REG098, 0x03); - write_radio_reg(pi, RADIO_2064_REG00B, 0x7); - mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); - write_radio_reg(pi, RADIO_2064_REG01D, 0x01); - write_radio_reg(pi, RADIO_2064_REG114, 0x01); - write_radio_reg(pi, RADIO_2064_REG02E, 0x10); - write_radio_reg(pi, RADIO_2064_REG12A, 0x08); - - mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); - mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); - mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); - mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); - write_phy_reg(pi, 0x6da, 0xffff); - or_phy_reg(pi, 0x6db, 0x3); + mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); - wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); - set_gain = false; - - lna2_gain = 3; - while ((lna2_gain >= 0) && !set_gain) { - tia_gain = 4; - - while ((tia_gain >= 0) && !set_gain) { - biq1_gain = 6; - - while ((biq1_gain >= 0) && !set_gain) { - set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, - (u16) - biq1_gain, - (u16) - tia_gain, - (u16) - lna2_gain); - biq1_gain -= 1; - } + write_radio_reg(pi, RADIO_2064_REG116, 0x06); + write_radio_reg(pi, RADIO_2064_REG12C, 0x07); + write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); + write_radio_reg(pi, RADIO_2064_REG098, 0x03); + write_radio_reg(pi, RADIO_2064_REG00B, 0x7); + mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); + write_radio_reg(pi, RADIO_2064_REG01D, 0x01); + write_radio_reg(pi, RADIO_2064_REG114, 0x01); + write_radio_reg(pi, RADIO_2064_REG02E, 0x10); + write_radio_reg(pi, RADIO_2064_REG12A, 0x08); + + mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); + mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); + + mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); + + wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); + write_phy_reg(pi, 0x6da, 0xffff); + or_phy_reg(pi, 0x6db, 0x3); + wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); + wlc_lcnphy_rx_gain_override_enable(pi, true); + + tia_gain = 8; + rx_pwr_threshold = 950; + while (tia_gain > 0) { tia_gain -= 1; + wlc_lcnphy_set_rx_gain_by_distribution(pi, + 0, 0, 2, 2, + (u16) + tia_gain, 1, 0); + udelay(500); + + received_power = + wlc_lcnphy_measure_digital_power(pi, 2000); + if (received_power < rx_pwr_threshold) + break; } - lna2_gain -= 1; - } + result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); - if (set_gain) - result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); - else - result = false; + wlc_lcnphy_stop_tx_tone(pi); - wlc_lcnphy_stop_tx_tone(pi); + write_phy_reg(pi, 0x631, Core1TxControl_old); - write_phy_reg(pi, 0x631, Core1TxControl_old); - - write_phy_reg(pi, 0x44c, RFOverrideVal0_old); - write_phy_reg(pi, 0x44d, RFOverrideVal0_old); - write_phy_reg(pi, 0x4b0, rfoverride2_old); - write_phy_reg(pi, 0x4b1, rfoverride2val_old); - write_phy_reg(pi, 0x4f9, rfoverride3_old); - write_phy_reg(pi, 0x4fa, rfoverride3val_old); - write_phy_reg(pi, 0x938, rfoverride4_old); - write_phy_reg(pi, 0x939, rfoverride4val_old); - write_phy_reg(pi, 0x43b, afectrlovr_old); - write_phy_reg(pi, 0x43c, afectrlovrval_old); - write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); - write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); + write_phy_reg(pi, 0x44c, RFOverrideVal0_old); + write_phy_reg(pi, 0x44d, RFOverrideVal0_old); + write_phy_reg(pi, 0x4b0, rfoverride2_old); + write_phy_reg(pi, 0x4b1, rfoverride2val_old); + write_phy_reg(pi, 0x4f9, rfoverride3_old); + write_phy_reg(pi, 0x4fa, rfoverride3val_old); + write_phy_reg(pi, 0x938, rfoverride4_old); + write_phy_reg(pi, 0x939, rfoverride4val_old); + write_phy_reg(pi, 0x43b, afectrlovr_old); + write_phy_reg(pi, 0x43c, afectrlovrval_old); + write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); + write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); - wlc_lcnphy_clear_trsw_override(pi); + wlc_lcnphy_clear_trsw_override(pi); - mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); + mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); - for (i = 0; i < 11; i++) - write_radio_reg(pi, rxiq_cal_rf_reg[i], - values_to_save[i]); + for (i = 0; i < 11; i++) + write_radio_reg(pi, rxiq_cal_rf_reg[i], + values_to_save[i]); - if (tx_gain_override_old) - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); - else - wlc_lcnphy_disable_tx_gain_override(pi); + if (tx_gain_override_old) + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); + else + wlc_lcnphy_disable_tx_gain_override(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); - wlc_lcnphy_rx_gain_override_enable(pi, false); + wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); + wlc_lcnphy_rx_gain_override_enable(pi, false); + } cal_done: kfree(ptr); @@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) write_radio_reg(pi, RADIO_2064_REG038, 3); write_radio_reg(pi, RADIO_2064_REG091, 7); } - - if (!(pi->sh->boardflags & BFL_FEM)) { - u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, - 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; - - write_radio_reg(pi, RADIO_2064_REG02A, 0xf); - write_radio_reg(pi, RADIO_2064_REG091, 0x3); - write_radio_reg(pi, RADIO_2064_REG038, 0x3); - - write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); - } } static int @@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) } else { mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); - mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); - mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); - mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); - mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); - mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); - mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); - mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); - mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); - mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); - mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); } } else { mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); @@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); - mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); } static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) { struct phytbl_info tab; u32 rfseq, ind; - u8 tssi_sel; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; @@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); - if (pi->sh->boardflags & BFL_FEM) { - tssi_sel = 0x1; - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); - } else { - tssi_sel = 0xe; - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); - } + wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); @@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { - mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); + mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { - mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); } @@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); - mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); - mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); - mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); - wlc_lcnphy_pwrctrl_rssiparams(pi); } @@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) read_radio_reg(pi, RADIO_2064_REG007) & 1; u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; - u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); - idleTssi = read_phy_reg(pi, 0x4ab); suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)); @@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); wlc_lcnphy_tssi_setup(pi); - - mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); - mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); - - wlc_lcnphy_set_bbmult(pi, 0x0); - wlc_phy_do_dummy_tx(pi, true, OFF); idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) >> 0); @@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); - wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); wlc_lcnphy_set_tx_gain(pi, &old_gains); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); @@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } - mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); - mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); - mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); - mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); - mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); @@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) target_gains.pad_gain = 21; target_gains.dac_gain = 0; wlc_lcnphy_set_tx_gain(pi, &target_gains); + wlc_lcnphy_set_tx_pwr_by_index(pi, 16); if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { @@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) lcnphy_recal ? LCNPHY_CAL_RECAL : LCNPHY_CAL_FULL), false); } else { - wlc_lcnphy_set_tx_pwr_by_index(pi, 16); wlc_lcnphy_tx_iqlo_soft_cal_full(pi); } @@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, if (CHSPEC_IS5G(pi->radio_chanspec)) pa_gain = 0x70; else - pa_gain = 0x60; + pa_gain = 0x70; if (pi->sh->boardflags & BFL_FEM) pa_gain = 0x10; - tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; tab.tbl_ptr = &val; for (j = 0; j < 128; j++) { - if (pi->sh->boardflags & BFL_FEM) - gm_gain = gain_table[j].gm; - else - gm_gain = 15; - + gm_gain = gain_table[j].gm; val = (((u32) pa_gain << 24) | (gain_table[j].pad << 16) | (gain_table[j].pga << 8) | gm_gain); @@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) write_phy_reg(pi, 0x4ea, 0x4688); - if (pi->sh->boardflags & BFL_FEM) - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); - else - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); @@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) wlc_lcnphy_rcal(pi); wlc_lcnphy_rc_cal(pi); - - if (!(pi->sh->boardflags & BFL_FEM)) { - write_radio_reg(pi, RADIO_2064_REG032, 0x6f); - write_radio_reg(pi, RADIO_2064_REG033, 0x19); - write_radio_reg(pi, RADIO_2064_REG039, 0xe); - } - } static void wlc_lcnphy_radio_init(struct brcms_phy *pi) @@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) wlc_lcnphy_write_table(pi, &tab); } - if (!(pi->sh->boardflags & BFL_FEM)) { - tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; - tab.tbl_width = 16; - tab.tbl_ptr = &val; - tab.tbl_len = 1; + tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; + tab.tbl_width = 16; + tab.tbl_ptr = &val; + tab.tbl_len = 1; - val = 150; - tab.tbl_offset = 0; - wlc_lcnphy_write_table(pi, &tab); + val = 114; + tab.tbl_offset = 0; + wlc_lcnphy_write_table(pi, &tab); - val = 220; - tab.tbl_offset = 1; - wlc_lcnphy_write_table(pi, &tab); - } + val = 130; + tab.tbl_offset = 1; + wlc_lcnphy_write_table(pi, &tab); + + val = 6; + tab.tbl_offset = 8; + wlc_lcnphy_write_table(pi, &tab); if (CHSPEC_IS2G(pi->radio_chanspec)) { if (pi->sh->boardflags & BFL_FEM) @@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) wlc_lcnphy_load_tx_iir_filter(pi, true, 3); mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); - wlc_lcnphy_tssi_setup(pi); } void wlc_phy_detach_lcnphy(struct brcms_phy *pi) @@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) return false; - if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { + if ((pi->sh->boardflags & BFL_FEM) && + (LCNREV_IS(pi->pubpi.phy_rev, 1))) { if (pi_lcn->lcnphy_tempsense_option == 3) { pi->hwpwrctrl = true; pi->hwpwrctrl_capable = true; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index b7e95acc2084..622c01ca72c5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { }; static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, - 0x0009, 0x000a, - 0x0005, - 0x0006, 0x0009, - 0x000a, - 0x0005, 0x0006, - 0x0009, - 0x000a, 0x0005, - 0x0006, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, + 0x000a, 0x0009, + 0x0006, + 0x0005, 0x000a, + 0x0009, + 0x0006, 0x0005, + 0x000a, + 0x0009, 0x0006, + 0x0005, }; static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index 3630a41df50d..c353b5f19c8c 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il, dma_addr_t txcmd_phys; int txq_id = skb_get_queue_mapping(skb); u16 len, idx, hdr_len; + u16 firstlen, secondlen; u8 id; u8 unicast; u8 sta_id; @@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il, len = sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len; - len = (len + 3) & ~3; + firstlen = (len + 3) & ~3; /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); + pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, + PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ - len = skb->len - hdr_len; - if (len) { + secondlen = skb->len - hdr_len; + if (secondlen > 0) { phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, len, + pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) goto drop_unlock; @@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il, /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ - il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0); + il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, len); - if (len) - il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0, - U32_PAD(len)); + dma_unmap_len_set(out_meta, len, firstlen); + if (secondlen > 0) + il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, + U32_PAD(secondlen)); if (!ieee80211_has_morefrags(hdr->frame_control)) { txq->need_update = 1; diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index e8324b5e5bfe..6c7493c2d698 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, int rate_idx; int i; u32 rate; - u8 use_green = il4965_rs_use_green(il, sta); + u8 use_green; u8 active_tbl = 0; u8 valid_tx_ant; struct il_station_priv *sta_priv; @@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, if (!sta || !lq_sta) return; + use_green = il4965_rs_use_green(il, sta); sta_priv = (void *)sta->drv_priv; i = lq_sta->last_txrate_idx; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 86ea5f4c3939..44ca0e57f9f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) } /* + * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag + * in iwl_down but cancel the workers only later. + */ + if (!priv->ucode_loaded) { + IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); + return -EIO; + } + + /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 23be948cf162..a82b6b39d4ff 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); + if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { + /* + * If we go idle, then clearly no "passive-no-rx" + * workaround is needed any more, this is a reset. + */ + iwlagn_lift_passive_no_rx(priv); + } + if (unlikely(!iwl_is_ready(priv))) { IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); mutex_unlock(&priv->mutex); @@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, priv->timestamp = bss_conf->sync_tsf; ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; } else { - /* - * If we disassociate while there are pending - * frames, just wake up the queues and let the - * frames "escape" ... This shouldn't really - * be happening to start with, but we should - * not get stuck in this case either since it - * can happen if userspace gets confused. - */ - iwlagn_lift_passive_no_rx(priv); - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; if (ctx->ctxid == IWL_RXON_CTX_BSS) diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index 94ef33838bc6..b775769f8322 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); if (!(flags & CMD_ASYNC)) { - cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; + cmd.flags |= CMD_WANT_SKB; might_sleep(); } diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb27..d1a670d7b10c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, memset(&info->status, 0, sizeof(info->status)); if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && - iwl_is_associated_ctx(ctx) && ctx->vif && + ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { /* block and stop all queues */ priv->passive_no_rx = true; diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 736fe9bb140e..1a4ac9236a44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return -EIO; } + priv->ucode_loaded = true; + if (ucode_type != IWL_UCODE_WOWLAN) { /* delay a bit to give rfkill time to run */ msleep(5); @@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return ret; } - priv->ucode_loaded = true; - return 0; } diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 10f01793d7a6..81aa91fab5aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, __entry->flags = cmd->flags; memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { if (!cmd->len[i]) continue; memcpy((u8 *)__get_dynamic_array(hcmd) + offset, diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 6f228bb2b844..fbfd2d137117 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv) /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { - .amsdu_size_8K = 1, .restart_fw = 1, .plcp_check = true, .bt_coex_active = true, @@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index e5e3a79eae2f..2c2a729092f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -91,7 +91,7 @@ enum iwl_power_level { * @sw_crypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_DISABLE_HT_* constants - * @amsdu_size_8K: enable 8K amsdu size, default = 1 + * @amsdu_size_8K: enable 8K amsdu size, default = 0 * @restart_fw: restart firmware, default = 1 * @plcp_check: enable plcp health check, default = true * @wd_disable: enable stuck queue check, default = 0 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 8c7bec6b9a0b..0cac2b7af78b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -186,19 +186,13 @@ struct iwl_rx_packet { * @CMD_ASYNC: Return right away and don't want for the response * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the * response. The caller needs to call iwl_free_resp when done. - * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the - * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be - * copied. The pointer passed to the response handler is in the transport - * ownership and don't need to be freed by the op_mode. This also means - * that the pointer is invalidated after the op_mode's handler returns. * @CMD_ON_DEMAND: This command is sent by the test mode pipe. */ enum CMD_MODE { CMD_SYNC = 0, CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), - CMD_WANT_HCMD = BIT(2), - CMD_ON_DEMAND = BIT(3), + CMD_ON_DEMAND = BIT(2), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -217,7 +211,11 @@ struct iwl_device_cmd { #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) -#define IWL_MAX_CMD_TFDS 2 +/* + * number of transfer buffers (fragments) per transmit frame descriptor; + * this is just the driver's idea, the hardware supports 20 + */ +#define IWL_MAX_CMD_TBS_PER_TFD 2 /** * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command @@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag { * @id: id of the host command */ struct iwl_host_cmd { - const void *data[IWL_MAX_CMD_TFDS]; + const void *data[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_rx_packet *resp_pkt; unsigned long _rx_page_addr; u32 _rx_page_order; int handler_status; u32 flags; - u16 len[IWL_MAX_CMD_TFDS]; - u8 dataflags[IWL_MAX_CMD_TFDS]; + u16 len[IWL_MAX_CMD_TBS_PER_TFD]; + u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; u8 id; }; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 23eebda848b0..2adb61f103f4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -762,18 +762,20 @@ struct iwl_phy_context_cmd { #define IWL_RX_INFO_PHY_CNT 8 #define IWL_RX_INFO_AGC_IDX 1 #define IWL_RX_INFO_RSSI_AB_IDX 2 -#define IWL_RX_INFO_RSSI_C_IDX 3 -#define IWL_OFDM_AGC_DB_MSK 0xfe00 -#define IWL_OFDM_AGC_DB_POS 9 +#define IWL_OFDM_AGC_A_MSK 0x0000007f +#define IWL_OFDM_AGC_A_POS 0 +#define IWL_OFDM_AGC_B_MSK 0x00003f80 +#define IWL_OFDM_AGC_B_POS 7 +#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000 +#define IWL_OFDM_AGC_CODE_POS 20 #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff -#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 #define IWL_OFDM_RSSI_A_POS 0 +#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 +#define IWL_OFDM_RSSI_ALLBAND_A_POS 8 #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 -#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 #define IWL_OFDM_RSSI_B_POS 16 -#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff -#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 -#define IWL_OFDM_RSSI_C_POS 0 +#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 +#define IWL_OFDM_RSSI_ALLBAND_B_POS 24 /** * struct iwl_rx_phy_info - phy info diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index d3d959db03a9..500f818dba04 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -79,17 +79,8 @@ #define UCODE_VALID_OK cpu_to_le32(0x1) /* Default calibration values for WkP - set to INIT image w/o running */ -static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f, - 0x00, 0x18, 0x00 }; -static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f }; -static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 }; -static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00, - 0x00 }; -static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 }; static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; -static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 }; struct iwl_calib_default_data { u16 size; @@ -99,12 +90,7 @@ struct iwl_calib_default_data { #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} static const struct iwl_calib_default_data wkp_calib_default_data[12] = { - [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc), - [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter), - [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo), - [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq), [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), - [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq), [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), }; @@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return 0; } -#define IWL_HW_REV_ID_RAINBOW 0x2 -#define IWL_PROJ_TYPE_LHP 0x5 - -static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm) -{ - struct iwl_nvm_data *data = mvm->nvm_data; - /* Temp calls to static definitions, will be changed to CSR calls */ - u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW; - u8 project_type = IWL_PROJ_TYPE_LHP; - - return data->radio_cfg_dash | (data->radio_cfg_step << 2) | - (hw_rev_id << 4) | ((project_type & 0x7f) << 6) | - (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20); -} static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { @@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) enum iwl_ucode_type ucode_type = mvm->cur_ucode; /* Set parameters */ - phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); + phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config); phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; phy_cfg_cmd.calib_control.flow_trigger = @@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) sizeof(phy_cfg_cmd), &phy_cfg_cmd); } -/* Starting with the new PHY DB implementation - New calibs are enabled */ -/* Value - 0x405e7 */ -#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\ - IWL_CALIB_CFG_TEMPERATURE_IDX |\ - IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ - IWL_CALIB_CFG_DC_IDX |\ - IWL_CALIB_CFG_BB_FILTER_IDX |\ - IWL_CALIB_CFG_LO_LEAKAGE_IDX |\ - IWL_CALIB_CFG_TX_IQ_IDX |\ - IWL_CALIB_CFG_RX_IQ_IDX |\ - IWL_CALIB_CFG_AGC_IDX) - -#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0 - -/* Value 0x41567 */ -#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\ - IWL_CALIB_CFG_TEMPERATURE_IDX |\ - IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ - IWL_CALIB_CFG_BB_FILTER_IDX |\ - IWL_CALIB_CFG_DC_IDX |\ - IWL_CALIB_CFG_TX_IQ_IDX |\ - IWL_CALIB_CFG_RX_IQ_IDX |\ - IWL_CALIB_CFG_SENSITIVITY_IDX |\ - IWL_CALIB_CFG_AGC_IDX) - -#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\ - IWL_CALIB_CFG_TEMPERATURE_IDX |\ - IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ - IWL_CALIB_CFG_TX_PWR_IDX |\ - IWL_CALIB_CFG_DC_IDX |\ - IWL_CALIB_CFG_TX_IQ_IDX |\ - IWL_CALIB_CFG_SENSITIVITY_IDX) - -/* - * Sets the calibrations trigger values that will be sent to the FW for runtime - * and init calibrations. - * The ones given in the FW TLV are not correct. - */ -static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm) -{ - struct iwl_tlv_calib_ctrl default_calib; - - /* - * WkP FW TLV calib bits are wrong, overwrite them. - * This defines the dynamic calibrations which are implemented in the - * uCode both for init(flow) calculation and event driven calibs. - */ - - /* Init Image */ - default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT); - default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT); - - if (default_calib.event_trigger != - mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger) - IWL_ERR(mvm, - "Updating the event calib for INIT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger, - default_calib.event_trigger); - if (default_calib.flow_trigger != - mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger) - IWL_ERR(mvm, - "Updating the flow calib for INIT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger, - default_calib.flow_trigger); - - memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT], - &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); - IWL_ERR(mvm, - "Setting uCode init calibrations event 0x%x, trigger 0x%x\n", - default_calib.event_trigger, - default_calib.flow_trigger); - - /* Run time image */ - default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN); - default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN); - - if (default_calib.event_trigger != - mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger) - IWL_ERR(mvm, - "Updating the event calib for RT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger, - default_calib.event_trigger); - if (default_calib.flow_trigger != - mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger) - IWL_ERR(mvm, - "Updating the flow calib for RT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger, - default_calib.flow_trigger); - - memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR], - &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); - IWL_ERR(mvm, - "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n", - default_calib.event_trigger, - default_calib.flow_trigger); -} - static int iwl_set_default_calibrations(struct iwl_mvm *mvm) { u8 cmd_raw[16]; /* holds the variable size commands */ @@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); WARN_ON(ret); - /* Override the calibrations from TLV and the const of fw */ - iwl_set_default_calib_trigger(mvm); + /* Send TX valid antennas before triggering calibrations */ + ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); + if (ret) + goto error; /* WkP doesn't have all calibrations, need to set default values */ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 537711b10478..bdae700c769e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -80,7 +80,8 @@ #define IWL_INVALID_MAC80211_QUEUE 0xff #define IWL_MVM_MAX_ADDRESSES 2 -#define IWL_RSSI_OFFSET 44 +/* RSSI offset for WkP */ +#define IWL_RSSI_OFFSET 50 enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BK = 0, diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index aa59adf87db3..d0f9c1e0475e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) ieee80211_free_txskb(mvm->hw, skb); } -static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - iwl_mvm_dump_nic_error_log(mvm); - iwl_abort_notification_waits(&mvm->notif_wait); /* @@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) } } +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_mvm_dump_nic_error_log(mvm); + + iwl_mvm_nic_restart(mvm); +} + static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) { + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + WARN_ON(1); + iwl_mvm_nic_restart(mvm); } static const struct iwl_op_mode_ops iwl_mvm_ops = { diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 3f40ab05bbd8..b0b190d0ec23 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info) { - u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; + int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; + int rssi_all_band_a, rssi_all_band_b; + u32 agc_a, agc_b, max_agc; u32 val; - /* Find max rssi among 3 possible receivers. + /* Find max rssi among 2 possible receivers. * These values are measured by the Digital Signal Processor (DSP). * They should stay fairly constant even as the signal strength varies, * if the radio's Automatic Gain Control (AGC) is working right. * AGC value (see below) will provide the "interesting" info. */ + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); + agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; + agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; + max_agc = max_t(u32, agc_a, agc_b); + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; - val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); - rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; - - val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); - agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS; + rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >> + IWL_OFDM_RSSI_ALLBAND_A_POS; + rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >> + IWL_OFDM_RSSI_ALLBAND_B_POS; - max_rssi = max_t(u32, rssi_a, rssi_b); - max_rssi = max_t(u32, max_rssi, rssi_c); + /* + * dBm = rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. + */ + rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a; + rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b; + max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm); - IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", - rssi_a, rssi_b, rssi_c, max_rssi, agc_db); + IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", + rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc_db - IWL_RSSI_OFFSET; + return max_rssi_dbm; } /* diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 861a7f9f8e7f..274f44e2ef60 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u16 txq_id; int err; + + /* + * If mac80211 is cleaning its state, then say that we finished since + * our state has been cleared anyway. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + return 0; + } + spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 6b67ce3f679c..6645efe5c03e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* Single frame failure in an AMPDU queue => send BAR */ if (txq_id >= IWL_FIRST_AMPDU_QUEUE && - !(info->flags & IEEE80211_TX_STAT_ACK)) { - /* there must be only one skb in the skb_list */ - WARN_ON_ONCE(skb_freed > 1 || - !skb_queue_empty(&skbs)); + !(info->flags & IEEE80211_TX_STAT_ACK)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - } /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 3d62e8055352..148843e7f34f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; - - DEFINE_DMA_UNMAP_ADDR(mapping); - DEFINE_DMA_UNMAP_LEN(len); - u32 flags; }; @@ -185,25 +181,36 @@ struct iwl_queue { /* * The FH will write back to the first TB only, so we need * to copy some data into the buffer regardless of whether - * it should be mapped or not. This indicates how much to - * copy, even for HCMDs it must be big enough to fit the - * DRAM scratch from the TX cmd, at least 16 bytes. + * it should be mapped or not. This indicates how big the + * first TB must be to include the scratch buffer. Since + * the scratch is 4 bytes at offset 12, it's 16 now. If we + * make it bigger then allocations will be bigger and copy + * slower, so that's probably not useful. */ -#define IWL_HCMD_MIN_COPY_SIZE 16 +#define IWL_HCMD_SCRATCHBUF_SIZE 16 struct iwl_pcie_txq_entry { struct iwl_device_cmd *cmd; - struct iwl_device_cmd *copy_cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; struct iwl_cmd_meta meta; }; +struct iwl_pcie_txq_scratch_buf { + struct iwl_cmd_header hdr; + u8 buf[8]; + __le32 scratch; +}; + /** * struct iwl_txq - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) + * @scratchbufs: start of command headers, including scratch buffers, for + * the writeback -- this is DMA memory and an array holding one buffer + * for each command on the queue + * @scratchbufs_dma: DMA address for the scratchbufs start * @entries: transmit entries (driver state) * @lock: queue lock * @stuck_timer: timer that fires if queue gets stuck @@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry { struct iwl_txq { struct iwl_queue q; struct iwl_tfd *tfds; + struct iwl_pcie_txq_scratch_buf *scratchbufs; + dma_addr_t scratchbufs_dma; struct iwl_pcie_txq_entry *entries; spinlock_t lock; struct timer_list stuck_timer; @@ -225,6 +234,13 @@ struct iwl_txq { u8 active; }; +static inline dma_addr_t +iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) +{ + return txq->scratchbufs_dma + + sizeof(struct iwl_pcie_txq_scratch_buf) * idx; +} + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index b0ae06d2456f..567e67ad1f61 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) { - struct iwl_pcie_txq_entry *ent; - ent = &txq->entries[cmd_index]; - cmd = ent->copy_cmd; - WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); - } else { + if (reclaim) + cmd = txq->entries[cmd_index].cmd; + else cmd = NULL; - } err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); if (reclaim) { - /* The original command isn't needed any more */ - kfree(txq->entries[cmd_index].copy_cmd); - txq->entries[cmd_index].copy_cmd = NULL; - /* nor is the duplicated part of the command */ kfree(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 17bedc50e753..12c4f31ca8fb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans_pcie->status); + else + clear_bit(STATUS_RFKILL, &trans_pcie->status); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); if (hw_rfkill && !run_in_rfkill) return -ERFKILL; @@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int err; @@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans_pcie->status); + else + clear_bit(STATUS_RFKILL, &trans_pcie->status); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); return 0; @@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, * op_mode. */ hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans_pcie->status); + else + clear_bit(STATUS_RFKILL, &trans_pcie->status); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); } } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8b625a7f5685..cb5c6792e3a8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) } for (i = q->read_ptr; i != q->write_ptr; - i = iwl_queue_inc_wrap(i, q->n_bd)) { - struct iwl_tx_cmd *tx_cmd = - (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; + i = iwl_queue_inc_wrap(i, q->n_bd)) IWL_ERR(trans, "scratch %d = 0x%08x\n", i, - get_unaligned_le32(&tx_cmd->scratch)); - } + le32_to_cpu(txq->scratchbufs[i].scratch)); iwl_op_mode_nic_error(trans->op_mode); } @@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) } static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, - enum dma_data_direction dma_dir) + struct iwl_cmd_meta *meta, + struct iwl_tfd *tfd) { int i; int num_tbs; @@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, return; } - /* Unmap tx_cmd */ - if (num_tbs) - dma_unmap_single(trans->dev, - dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), - DMA_BIDIRECTIONAL); + /* first TB is never freed - it's the scratchbuf data */ - /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); tfd->num_tbs = 0; } @@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - enum dma_data_direction dma_dir) +static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_tfd *tfd_tmp = txq->tfds; @@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, lockdep_assert_held(&txq->lock); /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], - dma_dir); + iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); /* free SKB */ if (txq->entries) { @@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; + size_t scratchbuf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) @@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); goto error; } + + BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); + BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch)); + + scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; + + txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, + &txq->scratchbufs_dma, + GFP_KERNEL); + if (!txq->scratchbufs) + goto err_free_tfds; + txq->q.id = txq_id; return 0; +err_free_tfds: + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); error: if (txq->entries && txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) @@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; - enum dma_data_direction dma_dir; if (!q->n_bd) return; - /* In the command queue, all the TBs are mapped as BIDI - * so unmap them as such. - */ - if (txq_id == trans_pcie->cmd_queue) - dma_dir = DMA_BIDIRECTIONAL; - else - dma_dir = DMA_TO_DEVICE; - spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { - iwl_pcie_txq_free_tfd(trans, txq, dma_dir); + iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } spin_unlock_bh(&txq->lock); @@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->q.n_window; i++) { kfree(txq->entries[i].cmd); - kfree(txq->entries[i].copy_cmd); kfree(txq->entries[i].free_buf); } @@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) dma_free_coherent(dev, sizeof(struct iwl_tfd) * txq->q.n_bd, txq->tfds, txq->q.dma_addr); txq->q.dma_addr = 0; + + dma_free_coherent(dev, + sizeof(*txq->scratchbufs) * txq->q.n_window, + txq->scratchbufs, txq->scratchbufs_dma); } kfree(txq->entries); @@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); - iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); + iwl_pcie_txq_free_tfd(trans, txq); } iwl_pcie_txq_progress(trans_pcie, txq); @@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, void *dup_buf = NULL; dma_addr_t phys_addr; int idx; - u16 copy_size, cmd_size, dma_size; + u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; int i; u32 cmd_pos; - const u8 *cmddata[IWL_MAX_CMD_TFDS]; - u16 cmdlen[IWL_MAX_CMD_TFDS]; + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); /* need one for the header if the first is NOCOPY */ - BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); + BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; if (!cmd->len[i]) continue; - /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ - if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { - int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; + /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; @@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* and copy the data that needs to be copied */ cmd_pos = offsetof(struct iwl_device_cmd, payload); copy_size = sizeof(out_cmd->hdr); - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy = 0; - if (!cmd->len) + if (!cmd->len[i]) continue; - /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ - if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { - copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; + /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; @@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } } - WARN_ON_ONCE(txq->entries[idx].copy_cmd); - - /* - * since out_cmd will be the source address of the FH, it will write - * the retry count there. So when the user needs to receivce the HCMD - * that corresponds to the response in the response handler, it needs - * to set CMD_WANT_HCMD. - */ - if (cmd->flags & CMD_WANT_HCMD) { - txq->entries[idx].copy_cmd = - kmemdup(out_cmd, cmd_pos, GFP_ATOMIC); - if (unlikely(!txq->entries[idx].copy_cmd)) { - idx = -ENOMEM; - goto out; - } - } - IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); - /* - * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must - * still map at least that many bytes for the hardware to write back to. - * We have enough space, so that's not a problem. - */ - dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); + /* start the TFD with the scratchbuf */ + scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); + memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); + iwl_pcie_txq_build_tfd(trans, txq, + iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), + scratch_size, 1); + + /* map first command fragment, if any remains */ + if (copy_size > scratch_size) { + phys_addr = dma_map_single(trans->dev, + ((u8 *)&out_cmd->hdr) + scratch_size, + copy_size - scratch_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + idx = -ENOMEM; + goto out; + } - phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { - idx = -ENOMEM; - goto out; + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, + copy_size - scratch_size, 0); } - dma_unmap_addr_set(out_meta, mapping, phys_addr); - dma_unmap_len_set(out_meta, len, dma_size); - - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); - /* map the remaining (adjusted) nocopy/dup fragments */ - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { const void *data = cmddata[i]; if (!cmdlen[i]) @@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, (void *)data, - cmdlen[i], DMA_BIDIRECTIONAL); + cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr], - DMA_BIDIRECTIONAL); + &txq->tfds[q->write_ptr]); idx = -ENOMEM; goto out; } @@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; - iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); + iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { @@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; struct iwl_queue *q; - dma_addr_t phys_addr = 0; - dma_addr_t txcmd_phys; - dma_addr_t scratch_phys; - u16 len, firstlen, secondlen; + dma_addr_t tb0_phys, tb1_phys, scratch_phys; + void *tb1_addr; + u16 len, tb1_len, tb2_len; u8 wait_write_ptr = 0; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); @@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); + tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); + scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); + /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) */ - len = sizeof(struct iwl_tx_cmd) + - sizeof(struct iwl_cmd_header) + hdr_len; - firstlen = (len + 3) & ~3; + len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; + tb1_len = (len + 3) & ~3; /* Tell NIC about any 2-byte padding after MAC header */ - if (firstlen != len) + if (tb1_len != len) tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = dma_map_single(trans->dev, - &dev_cmd->hdr, firstlen, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) - goto out_err; - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, firstlen); + /* The first TB points to the scratchbuf data - min_copy bytes */ + memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, + IWL_HCMD_SCRATCHBUF_SIZE); + iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, + IWL_HCMD_SCRATCHBUF_SIZE, 1); - if (!ieee80211_has_morefrags(fc)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } + /* there must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - secondlen = skb->len - hdr_len; - if (secondlen > 0) { - phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, - secondlen, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { - dma_unmap_single(trans->dev, - dma_unmap_addr(out_meta, mapping), - dma_unmap_len(out_meta, len), - DMA_BIDIRECTIONAL); + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; + tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) + goto out_err; + iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); + + /* + * Set up TFD's third entry to point directly to remainder + * of skb, if any (802.11 null frames have no payload). + */ + tb2_len = skb->len - hdr_len; + if (tb2_len > 0) { + dma_addr_t tb2_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); goto out_err; } + iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); } - /* Attach buffers to TFD */ - iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1); - if (secondlen > 0) - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0); - - scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); - - /* take back ownership of DMA buffer to enable update */ - dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, - DMA_BIDIRECTIONAL); - tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); - tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); - dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, - DMA_BIDIRECTIONAL); - trace_iwlwifi_dev_tx(trans->dev, skb, &txq->tfds[txq->q.write_ptr], sizeof(struct iwl_tfd), - &dev_cmd->hdr, firstlen, - skb->data + hdr_len, secondlen); + &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, secondlen); + skb->data + hdr_len, tb2_len); + + if (!ieee80211_has_morefrags(fc)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } /* start timer if queue currently empty */ if (txq->need_update && q->read_ptr == q->write_ptr && diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a44023a7bd57..8aaf56ade4d9 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, } } - for (i = 0; i < request->n_channels; i++) { + for (i = 0; i < min_t(u32, request->n_channels, + MWIFIEX_USER_SCAN_CHAN_MAX); i++) { chan = request->channels[i]; priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; priv->user_scan_cfg->chan_list[i].radio_type = chan->band; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 20a6c5555873..b5c8b962ce12 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, return -1; } + cmd_code = le16_to_cpu(host_cmd->command); + cmd_size = le16_to_cpu(host_cmd->size); + + if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && + cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && + cmd_code != HostCmd_CMD_FUNC_INIT) { + dev_err(adapter->dev, + "DNLD_CMD: FW in reset state, ignore cmd %#x\n", + cmd_code); + mwifiex_complete_cmd(adapter, cmd_node); + mwifiex_insert_cmd_to_free_q(adapter, cmd_node); + return -1; + } + /* Set command sequence number */ adapter->seq_num++; host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO @@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, adapter->curr_cmd = cmd_node; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - cmd_code = le16_to_cpu(host_cmd->command); - cmd_size = le16_to_cpu(host_cmd->size); - /* Adjust skb length */ if (cmd_node->cmd_skb->len > cmd_size) /* @@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid, data_buf); - if (!ret) - ret = mwifiex_wait_queue_complete(adapter); return ret; } @@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, if (cmd_no == HostCmd_CMD_802_11_SCAN) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { - adapter->cmd_queued = cmd_node; mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); + if (cmd_node->wait_q_enabled) + ret = mwifiex_wait_queue_complete(adapter, cmd_node); } return ret; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index e38aa9b3663d..0ff4c37ab42a 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) return ret; } + /* cancel current command */ + if (adapter->curr_cmd) { + dev_warn(adapter->dev, "curr_cmd is still in processing\n"); + del_timer(&adapter->cmd_timer); + mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); + adapter->curr_cmd = NULL; + } + /* shut down mwifiex */ dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 246aa62a4817..2fe0ceba4400 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, adhoc_join->bss_descriptor.bssid, adhoc_join->bss_descriptor.ssid); - for (i = 0; bss_desc->supported_rates[i] && - i < MWIFIEX_SUPPORTED_RATES; - i++) - ; + for (i = 0; i < MWIFIEX_SUPPORTED_RATES && + bss_desc->supported_rates[i]; i++) + ; rates_size = i; /* Copy Data Rates from the Rates recorded in scan response */ diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 553adfb0aa81..7035ade9af74 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -723,7 +723,6 @@ struct mwifiex_adapter { u16 cmd_wait_q_required; struct mwifiex_wait_queue cmd_wait_q; u8 scan_wait_q_woken; - struct cmd_ctrl_node *cmd_queued; spinlock_t queue_lock; /* lock for tx queues */ struct completion fw_load; u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; @@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, struct mwifiex_multicast_list *mcast_list); int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, struct net_device *dev); -int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter); +int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, + struct cmd_ctrl_node *cmd_queued); int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct cfg80211_ssid *req_ssid); int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5c395e2e6a2b..feb204613397 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) } memcpy(adapter->upld_buf, skb->data, min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); + skb_push(skb, INTF_HEADER_LEN); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, PCI_DMA_FROMDEVICE)) return -1; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index bb60c2754a97..e7f6deaf715e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1388,10 +1388,15 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - adapter->cmd_queued = cmd_node; mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); + + /* Perform internal scan synchronously */ + if (!priv->scan_request) { + dev_dbg(adapter->dev, "wait internal scan\n"); + mwifiex_wait_queue_complete(adapter, cmd_node); + } } else { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); @@ -1790,7 +1795,12 @@ check_next_scan: /* Need to indicate IOCTL complete */ if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; - mwifiex_complete_cmd(adapter, adapter->curr_cmd); + if (!priv->scan_request) { + dev_dbg(adapter->dev, + "complete internal scan\n"); + mwifiex_complete_cmd(adapter, + adapter->curr_cmd); + } } if (priv->report_scan_result) priv->report_scan_result = false; @@ -1946,9 +1956,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv, /* Normal scan */ ret = mwifiex_scan_networks(priv, NULL); - if (!ret) - ret = mwifiex_wait_queue_complete(priv->adapter); - up(&priv->async_sem); return ret; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 9f33c92c90f5..13100f8de3db 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, * This function waits on a cmd wait queue. It also cancels the pending * request after waking up, in case of errors. */ -int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) +int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, + struct cmd_ctrl_node *cmd_queued) { int status; - struct cmd_ctrl_node *cmd_queued; - - if (!adapter->cmd_queued) - return 0; - - cmd_queued = adapter->cmd_queued; - adapter->cmd_queued = NULL; dev_dbg(adapter->dev, "cmd pending\n"); atomic_inc(&adapter->cmd_pending); diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 44d6ead43341..76cd47eb901e 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -20,6 +20,7 @@ if RT2X00 config RT2400PCI tristate "Ralink rt2400 (PCI/PCMCIA) support" depends on PCI + select RT2X00_LIB_MMIO select RT2X00_LIB_PCI select EEPROM_93CX6 ---help--- @@ -31,6 +32,7 @@ config RT2400PCI config RT2500PCI tristate "Ralink rt2500 (PCI/PCMCIA) support" depends on PCI + select RT2X00_LIB_MMIO select RT2X00_LIB_PCI select EEPROM_93CX6 ---help--- @@ -43,6 +45,7 @@ config RT61PCI tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" depends on PCI select RT2X00_LIB_PCI + select RT2X00_LIB_MMIO select RT2X00_LIB_FIRMWARE select RT2X00_LIB_CRYPTO select CRC_ITU_T @@ -55,10 +58,11 @@ config RT61PCI config RT2800PCI tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" - depends on PCI || RALINK_RT288X || RALINK_RT305X + depends on PCI || SOC_RT288X || SOC_RT305X select RT2800_LIB + select RT2X00_LIB_MMIO select RT2X00_LIB_PCI if PCI - select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X + select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X select RT2X00_LIB_FIRMWARE select RT2X00_LIB_CRYPTO select CRC_CCITT @@ -185,6 +189,9 @@ endif config RT2800_LIB tristate +config RT2X00_LIB_MMIO + tristate + config RT2X00_LIB_PCI tristate select RT2X00_LIB diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile index 349d5b8284a4..f069d8bc5b67 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/rt2x00/Makefile @@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o +obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 221beaaa83f1..dcfb54e0c516 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2400pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 39edc59e8d03..e1d2dc9ed28a 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2500pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 48a01aa21f1c..ba5a05625aaa 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -41,6 +41,7 @@ #include <linux/eeprom_93cx6.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2x00soc.h" #include "rt2800lib.h" @@ -89,7 +90,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); } -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); @@ -107,7 +108,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) { return -ENOMEM; } -#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ +#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ #ifdef CONFIG_PCI static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) @@ -1177,7 +1178,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); #endif /* CONFIG_PCI */ MODULE_LICENSE("GPL"); -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) static int rt2800soc_probe(struct platform_device *pdev) { return rt2x00soc_probe(pdev, &rt2800pci_ops); @@ -1194,7 +1195,7 @@ static struct platform_driver rt2800soc_driver = { .suspend = rt2x00soc_suspend, .resume = rt2x00soc_resume, }; -#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ +#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ #ifdef CONFIG_PCI static int rt2800pci_probe(struct pci_dev *pci_dev, @@ -1217,7 +1218,7 @@ static int __init rt2800pci_init(void) { int ret = 0; -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) ret = platform_driver_register(&rt2800soc_driver); if (ret) return ret; @@ -1225,7 +1226,7 @@ static int __init rt2800pci_init(void) #ifdef CONFIG_PCI ret = pci_register_driver(&rt2800pci_driver); if (ret) { -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) platform_driver_unregister(&rt2800soc_driver); #endif return ret; @@ -1240,7 +1241,7 @@ static void __exit rt2800pci_exit(void) #ifdef CONFIG_PCI pci_unregister_driver(&rt2800pci_driver); #endif -#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) +#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) platform_driver_unregister(&rt2800soc_driver); #endif } diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c new file mode 100644 index 000000000000..d84a680ba0c9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c @@ -0,0 +1,216 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mmio + Abstract: rt2x00 generic mmio device routines. + */ + +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "rt2x00.h" +#include "rt2x00mmio.h" + +/* + * Register access. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg) +{ + unsigned int i; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, offset, reg); + if (!rt2x00_get_field32(*reg, field)) + return 1; + udelay(REGISTER_BUSY_DELAY); + } + + printk_once(KERN_ERR "%s() Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); + *reg = ~0; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); + +bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue = rt2x00dev->rx; + struct queue_entry *entry; + struct queue_entry_priv_pci *entry_priv; + struct skb_frame_desc *skbdesc; + int max_rx = 16; + + while (--max_rx) { + entry = rt2x00queue_get_entry(queue, Q_INDEX); + entry_priv = entry->priv_data; + + if (rt2x00dev->ops->lib->get_entry_state(entry)) + break; + + /* + * Fill in desc fields of the skb descriptor + */ + skbdesc = get_skb_frame_desc(entry->skb); + skbdesc->desc = entry_priv->desc; + skbdesc->desc_len = entry->queue->desc_size; + + /* + * DMA is already done, notify rt2x00lib that + * it finished successfully. + */ + rt2x00lib_dmastart(entry); + rt2x00lib_dmadone(entry); + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(entry, GFP_ATOMIC); + } + + return !max_rx; +} +EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); + +void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) +{ + unsigned int i; + + for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) + msleep(10); +} +EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); + +/* + * Device initialization handlers. + */ +static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_pci *entry_priv; + void *addr; + dma_addr_t dma; + unsigned int i; + + /* + * Allocate DMA memory for descriptor and buffer. + */ + addr = dma_alloc_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, + &dma, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + memset(addr, 0, queue->limit * queue->desc_size); + + /* + * Initialize all queue entries to contain valid addresses. + */ + for (i = 0; i < queue->limit; i++) { + entry_priv = queue->entries[i].priv_data; + entry_priv->desc = addr + i * queue->desc_size; + entry_priv->desc_dma = dma + i * queue->desc_size; + } + + return 0; +} + +static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_pci *entry_priv = + queue->entries[0].priv_data; + + if (entry_priv->desc) + dma_free_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, + entry_priv->desc, entry_priv->desc_dma); + entry_priv->desc = NULL; +} + +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + int status; + + /* + * Allocate DMA + */ + queue_for_each(rt2x00dev, queue) { + status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); + if (status) + goto exit; + } + + /* + * Register interrupt handler. + */ + status = request_irq(rt2x00dev->irq, + rt2x00dev->ops->lib->irq_handler, + IRQF_SHARED, rt2x00dev->name, rt2x00dev); + if (status) { + ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", + rt2x00dev->irq, status); + goto exit; + } + + return 0; + +exit: + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00pci_initialize); + +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + /* + * Free irq line. + */ + free_irq(rt2x00dev->irq, rt2x00dev); + + /* + * Free DMA + */ + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); +} +EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); + +/* + * rt2x00mmio module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 mmio library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h new file mode 100644 index 000000000000..4ecaf60175bf --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h @@ -0,0 +1,119 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mmio + Abstract: Data structures for the rt2x00mmio module. + */ + +#ifndef RT2X00MMIO_H +#define RT2X00MMIO_H + +#include <linux/io.h> + +/* + * Register access. + */ +static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + *value = readl(rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + memcpy_fromio(value, rt2x00dev->csr.base + offset, length); +} + +static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + writel(value, rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const void *value, + const u32 length) +{ + __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); +} + +/** + * rt2x00pci_regbusy_read - Read from register with busy check + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @field: Field to check if register is busy + * @reg: Pointer to where register contents should be stored + * + * This function will read the given register, and checks if the + * register is busy. If it is, it will sleep for a couple of + * microseconds before reading the register again. If the register + * is not read after a certain timeout, this function will return + * FALSE. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg); + +/** + * struct queue_entry_priv_pci: Per entry PCI specific information + * + * @desc: Pointer to device descriptor + * @desc_dma: DMA pointer to &desc. + * @data: Pointer to device's entry memory. + * @data_dma: DMA pointer to &data. + */ +struct queue_entry_priv_pci { + __le32 *desc; + dma_addr_t desc_dma; +}; + +/** + * rt2x00pci_rxdone - Handle RX done events + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * + * Returns true if there are still rx frames pending and false if all + * pending rx frames were processed. + */ +bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00pci_flush_queue - Flush data queue + * @queue: Data queue to stop + * @drop: True to drop all pending frames. + * + * This will wait for a maximum of 100ms, waiting for the queues + * to become empty. + */ +void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); + +/* + * Device initialization handlers. + */ +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); + +#endif /* RT2X00MMIO_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a0c8caef3b0a..e87865e33113 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -33,182 +33,6 @@ #include "rt2x00pci.h" /* - * Register access. - */ -int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - const struct rt2x00_field32 field, - u32 *reg) -{ - unsigned int i; - - if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - return 0; - - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2x00pci_register_read(rt2x00dev, offset, reg); - if (!rt2x00_get_field32(*reg, field)) - return 1; - udelay(REGISTER_BUSY_DELAY); - } - - ERROR(rt2x00dev, "Indirect register access failed: " - "offset=0x%.08x, value=0x%.08x\n", offset, *reg); - *reg = ~0; - - return 0; -} -EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); - -bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue = rt2x00dev->rx; - struct queue_entry *entry; - struct queue_entry_priv_pci *entry_priv; - struct skb_frame_desc *skbdesc; - int max_rx = 16; - - while (--max_rx) { - entry = rt2x00queue_get_entry(queue, Q_INDEX); - entry_priv = entry->priv_data; - - if (rt2x00dev->ops->lib->get_entry_state(entry)) - break; - - /* - * Fill in desc fields of the skb descriptor - */ - skbdesc = get_skb_frame_desc(entry->skb); - skbdesc->desc = entry_priv->desc; - skbdesc->desc_len = entry->queue->desc_size; - - /* - * DMA is already done, notify rt2x00lib that - * it finished successfully. - */ - rt2x00lib_dmastart(entry); - rt2x00lib_dmadone(entry); - - /* - * Send the frame to rt2x00lib for further processing. - */ - rt2x00lib_rxdone(entry, GFP_ATOMIC); - } - - return !max_rx; -} -EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); - -void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) -{ - unsigned int i; - - for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) - msleep(10); -} -EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); - -/* - * Device initialization handlers. - */ -static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, - struct data_queue *queue) -{ - struct queue_entry_priv_pci *entry_priv; - void *addr; - dma_addr_t dma; - unsigned int i; - - /* - * Allocate DMA memory for descriptor and buffer. - */ - addr = dma_alloc_coherent(rt2x00dev->dev, - queue->limit * queue->desc_size, - &dma, GFP_KERNEL); - if (!addr) - return -ENOMEM; - - memset(addr, 0, queue->limit * queue->desc_size); - - /* - * Initialize all queue entries to contain valid addresses. - */ - for (i = 0; i < queue->limit; i++) { - entry_priv = queue->entries[i].priv_data; - entry_priv->desc = addr + i * queue->desc_size; - entry_priv->desc_dma = dma + i * queue->desc_size; - } - - return 0; -} - -static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, - struct data_queue *queue) -{ - struct queue_entry_priv_pci *entry_priv = - queue->entries[0].priv_data; - - if (entry_priv->desc) - dma_free_coherent(rt2x00dev->dev, - queue->limit * queue->desc_size, - entry_priv->desc, entry_priv->desc_dma); - entry_priv->desc = NULL; -} - -int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - int status; - - /* - * Allocate DMA - */ - queue_for_each(rt2x00dev, queue) { - status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); - if (status) - goto exit; - } - - /* - * Register interrupt handler. - */ - status = request_irq(rt2x00dev->irq, - rt2x00dev->ops->lib->irq_handler, - IRQF_SHARED, rt2x00dev->name, rt2x00dev); - if (status) { - ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", - rt2x00dev->irq, status); - goto exit; - } - - return 0; - -exit: - queue_for_each(rt2x00dev, queue) - rt2x00pci_free_queue_dma(rt2x00dev, queue); - - return status; -} -EXPORT_SYMBOL_GPL(rt2x00pci_initialize); - -void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) -{ - struct data_queue *queue; - - /* - * Free irq line. - */ - free_irq(rt2x00dev->irq, rt2x00dev); - - /* - * Free DMA - */ - queue_for_each(rt2x00dev, queue) - rt2x00pci_free_queue_dma(rt2x00dev, queue); -} -EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); - -/* * PCI driver handlers. */ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index e2c99f2b9a14..60d90b20f8b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -36,94 +36,6 @@ #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) /* - * Register access. - */ -static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - u32 *value) -{ - *value = readl(rt2x00dev->csr.base + offset); -} - -static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - void *value, const u32 length) -{ - memcpy_fromio(value, rt2x00dev->csr.base + offset, length); -} - -static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - u32 value) -{ - writel(value, rt2x00dev->csr.base + offset); -} - -static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - const void *value, - const u32 length) -{ - __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); -} - -/** - * rt2x00pci_regbusy_read - Read from register with busy check - * @rt2x00dev: Device pointer, see &struct rt2x00_dev. - * @offset: Register offset - * @field: Field to check if register is busy - * @reg: Pointer to where register contents should be stored - * - * This function will read the given register, and checks if the - * register is busy. If it is, it will sleep for a couple of - * microseconds before reading the register again. If the register - * is not read after a certain timeout, this function will return - * FALSE. - */ -int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - const struct rt2x00_field32 field, - u32 *reg); - -/** - * struct queue_entry_priv_pci: Per entry PCI specific information - * - * @desc: Pointer to device descriptor - * @desc_dma: DMA pointer to &desc. - * @data: Pointer to device's entry memory. - * @data_dma: DMA pointer to &data. - */ -struct queue_entry_priv_pci { - __le32 *desc; - dma_addr_t desc_dma; -}; - -/** - * rt2x00pci_rxdone - Handle RX done events - * @rt2x00dev: Device pointer, see &struct rt2x00_dev. - * - * Returns true if there are still rx frames pending and false if all - * pending rx frames were processed. - */ -bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); - -/** - * rt2x00pci_flush_queue - Flush data queue - * @queue: Data queue to stop - * @drop: True to drop all pending frames. - * - * This will wait for a maximum of 100ms, waiting for the queues - * to become empty. - */ -void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); - -/* - * Device initialization handlers. - */ -int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); -void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); - -/* * PCI driver handlers. */ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f95792cfcf89..9e3c8ff53e3f 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -35,6 +35,7 @@ #include <linux/eeprom_93cx6.h> #include "rt2x00.h" +#include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt61pci.h" diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index b1ccff474c79..c08d0f4c5f3d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw) void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) { - /* dummy routine needed for callback from rtl_op_configure_filter() */ -} - -/*========================================================================== */ - -static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, - enum nl80211_iftype type) -{ struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u8 filterout_non_associated_bssid = false; + u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); - switch (type) { - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_STATION: - filterout_non_associated_bssid = true; - break; - case NL80211_IFTYPE_UNSPECIFIED: - case NL80211_IFTYPE_AP: - default: - break; - } - if (filterout_non_associated_bssid) { + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + if (check_bssid) { + u8 tmp; if (IS_NORMAL_CHIP(rtlhal->version)) { - switch (rtlphy->current_io_type) { - case IO_CMD_RESUME_DM_BY_SCAN: - reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_RCR, (u8 *)(®_rcr)); - /* enable update TSF */ - _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_RCR, (u8 *)(®_rcr)); - /* disable update TSF */ - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); - break; - } + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + tmp = BIT(4); } else { - reg_rcr |= (RCR_CBSSID); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *)(®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); + reg_rcr |= RCR_CBSSID; + tmp = BIT(4) | BIT(5); } - } else if (filterout_non_associated_bssid == false) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *) (®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); + } else { + u8 tmp; if (IS_NORMAL_CHIP(rtlhal->version)) { - reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *)(®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); + tmp = BIT(4); } else { - reg_rcr &= (~RCR_CBSSID); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *)(®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); + reg_rcr &= ~RCR_CBSSID; + tmp = BIT(4) | BIT(5); } + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *) (®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0); } } +/*========================================================================== */ + int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (_rtl92cu_set_media_status(hw, type)) return -EOPNOTSUPP; - _rtl92cu_set_check_bssid(hw, type); + + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP) + rtl92cu_set_check_bssid(hw, true); + } else { + rtl92cu_set_check_bssid(hw, false); + } + return 0; } @@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, (shortgi_rate << 4) | (shortgi_rate); } rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", - rtl_read_dword(rtlpriv, REG_ARFR0)); } void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 156b52732f3d..5847d6d0881e 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, if (unlikely(!_urb)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't allocate urb. Drop skb!\n"); + kfree_skb(skb); return; } _rtl_submit_tx_urb(hw, _urb); diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index eef38cfd812e..ca33ae193935 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -22,7 +22,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/gpio.h> -#include <linux/mei_bus.h> +#include <linux/mei_cl_bus.h> #include <linux/nfc.h> #include <net/nfc/hci.h> @@ -32,9 +32,6 @@ #define MICROREAD_DRIVER_NAME "microread" -#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ - 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) - struct mei_nfc_hdr { u8 cmd; u8 status; @@ -48,7 +45,7 @@ struct mei_nfc_hdr { #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) struct microread_mei_phy { - struct mei_device *mei_device; + struct mei_cl_device *device; struct nfc_hci_dev *hdev; int powered; @@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb) MEI_DUMP_SKB_OUT("mei frame sent", skb); - r = mei_send(phy->device, skb->data, skb->len); + r = mei_cl_send(phy->device, skb->data, skb->len); if (r > 0) r = 0; return r; } -static void microread_event_cb(struct mei_device *device, u32 events, +static void microread_event_cb(struct mei_cl_device *device, u32 events, void *context) { struct microread_mei_phy *phy = context; @@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, if (phy->hard_fault != 0) return; - if (events & BIT(MEI_EVENT_RX)) { + if (events & BIT(MEI_CL_EVENT_RX)) { struct sk_buff *skb; int reply_size; @@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, if (!skb) return; - reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); + reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); if (reply_size < MEI_NFC_HEADER_SIZE) { kfree(skb); return; @@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = { .disable = microread_mei_disable, }; -static int microread_mei_probe(struct mei_device *device, - const struct mei_id *id) +static int microread_mei_probe(struct mei_cl_device *device, + const struct mei_cl_device_id *id) { struct microread_mei_phy *phy; int r; @@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device, } phy->device = device; - mei_set_clientdata(device, phy); + mei_cl_set_drvdata(device, phy); - r = mei_register_event_cb(device, microread_event_cb, phy); + r = mei_cl_register_event_cb(device, microread_event_cb, phy); if (r) { pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); goto err_out; @@ -186,9 +183,9 @@ err_out: return r; } -static int microread_mei_remove(struct mei_device *device) +static int microread_mei_remove(struct mei_cl_device *device) { - struct microread_mei_phy *phy = mei_get_clientdata(device); + struct microread_mei_phy *phy = mei_cl_get_drvdata(device); pr_info("Removing microread\n"); @@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device) return 0; } -static struct mei_id microread_mei_tbl[] = { - { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, +static struct mei_cl_device_id microread_mei_tbl[] = { + { MICROREAD_DRIVER_NAME }, /* required last entry */ { } }; - MODULE_DEVICE_TABLE(mei, microread_mei_tbl); -static struct mei_driver microread_driver = { +static struct mei_cl_driver microread_driver = { .id_table = microread_mei_tbl, .name = MICROREAD_DRIVER_NAME, @@ -225,7 +221,7 @@ static int microread_mei_init(void) pr_debug(DRIVER_DESC ": %s\n", __func__); - r = mei_driver_register(µread_driver); + r = mei_cl_driver_register(µread_driver); if (r) { pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); return r; @@ -236,7 +232,7 @@ static int microread_mei_init(void) static void microread_mei_exit(void) { - mei_driver_unregister(µread_driver); + mei_cl_driver_unregister(µread_driver); } module_init(microread_mei_init); diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 7db249a25016..46a7b738f61f 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -16,6 +16,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/init.h> +#include <asm/pci_debug.h> #include <asm/sclp.h> #define SLOT_NAME_SIZE 10 @@ -49,6 +50,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) return -EIO; rc = sclp_pci_configure(slot->zdev->fid); + zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, rc); if (!rc) { slot->zdev->state = ZPCI_FN_STATE_CONFIGURED; /* automatically scan the device after is was configured */ @@ -66,16 +68,16 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) if (!zpci_fn_configured(slot->zdev->state)) return -EIO; + rc = zpci_disable_device(slot->zdev); + if (rc) + return rc; /* TODO: we rely on the user to unbind/remove the device, is that plausible * or do we need to trigger that here? */ rc = sclp_pci_deconfigure(slot->zdev->fid); - if (!rc) { - /* Fixme: better call List-PCI to find the disabled FH - for the FID since the FH should be opaque... */ - slot->zdev->fh &= 0x7fffffff; + zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, rc); + if (!rc) slot->zdev->state = ZPCI_FN_STATE_STANDBY; - } return rc; } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index d927933dcf44..e4b1fb2c0f5d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) return; } - if (!pci_dev->pm_cap || !pci_dev->pme_support - || pci_check_pme_status(pci_dev)) { - if (pci_dev->pme_poll) - pci_dev->pme_poll = false; + /* Clear PME Status if set. */ + if (pci_dev->pme_support) + pci_check_pme_status(pci_dev); - pci_wakeup_event(pci_dev); - pm_runtime_resume(&pci_dev->dev); - } + if (pci_dev->pme_poll) + pci_dev->pme_poll = false; + + pci_wakeup_event(pci_dev); + pm_runtime_resume(&pci_dev->dev); if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1fa1e482a999..79277fb36c6b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev) /* * Turn off Bus Master bit on the device to tell it to not - * continue to do DMA + * continue to do DMA. Don't touch devices in D3cold or unknown states. */ - pci_clear_master(pci_dev); + if (pci_dev->current_state <= PCI_D3hot) + pci_clear_master(pci_dev); } #ifdef CONFIG_PM diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 0efba0539cc9..696caed5fdf5 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { #endif /* !PM */ /* - * PCIe port runtime suspend is broken for some chipsets, so use a - * black list to disable runtime PM for these chipsets. - */ -static const struct pci_device_id port_runtime_pm_black_list[] = { - { /* end: all zeroes */ } -}; - -/* * pcie_portdrv_probe - Probe PCI-Express port devices * @dev: PCI-Express port device being probed * @@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev, * it by default. */ dev->d3cold_allowed = false; - if (!pci_match_id(port_runtime_pm_black_list, dev)) - pm_runtime_put_noidle(&dev->dev); - return 0; } static void pcie_portdrv_remove(struct pci_dev *dev) { - if (!pci_match_id(port_runtime_pm_black_list, dev)) - pm_runtime_get_noresume(&dev->dev); pcie_port_device_remove(dev); pci_disable_device(dev); } diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index ab886b7ee327..c5d0a08a8747 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -118,17 +118,11 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) void __iomem *rom; /* - * Some devices may provide ROMs via a source other than the BAR - */ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt(pdev->rom); - /* * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy * memory map if the VGA enable bit of the Bridge Control register is * set for embedded VGA. */ - } else if (res->flags & IORESOURCE_ROM_SHADOW) { + if (res->flags & IORESOURCE_ROM_SHADOW) { /* primary video rom always starts here */ start = (loff_t)0xC0000; *size = 0x20000; /* cover C000:0 through E000:0 */ @@ -187,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) return; - if (!pdev->rom || !pdev->romlen) - iounmap(rom); + iounmap(rom); /* Disable again before continuing, leave enabled if pci=rom */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) @@ -212,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev) } } +/** + * pci_platform_rom - provides a pointer to any ROM image provided by the + * platform + * @pdev: pointer to pci device struct + * @size: pointer to receive size of pci window over ROM + */ +void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) +{ + if (pdev->rom && pdev->romlen) { + *size = pdev->romlen; + return phys_to_virt((phys_addr_t)pdev->rom); + } + + return NULL; +} + EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); EXPORT_SYMBOL_GPL(pci_enable_rom); EXPORT_SYMBOL_GPL(pci_disable_rom); +EXPORT_SYMBOL(pci_platform_rom); diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c index c689c04a4f52..2d2f0a43d36b 100644 --- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c +++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c @@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev) /* special soc specific control */ if (ctrl->mpp_get || ctrl->mpp_set) { - if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) { + if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) { dev_err(&pdev->dev, "wrong soc control info\n"); return -EINVAL; } diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index ac8d382a79bb..d611ecfcbf70 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c @@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = { static int pinconf_dbg_state_print(struct seq_file *s, void *d) { if (strlen(dbg_state_name)) - seq_printf(s, "%s\n", dbg_pinname); + seq_printf(s, "%s\n", dbg_state_name); else seq_printf(s, "No pin state set\n"); return 0; diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h index e3ed8cb072a5..bfda73d64eed 100644 --- a/drivers/pinctrl/pinconf.h +++ b/drivers/pinctrl/pinconf.h @@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot, * pin config. */ -#ifdef CONFIG_GENERIC_PINCONF +#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS) void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin); diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c index caecdd373061..c542a97c82f3 100644 --- a/drivers/pinctrl/pinctrl-abx500.c +++ b/drivers/pinctrl/pinctrl-abx500.c @@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, } /* check if pin use AlternateFunction register */ - if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED)) + if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED)) return mode; /* * if pin GPIOSEL bit is set and pin supports alternate function, diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 75933a6aa828..efb7f10e902a 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1277,21 +1277,80 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type) } #ifdef CONFIG_PM + +static u32 wakeups[MAX_GPIO_BANKS]; +static u32 backups[MAX_GPIO_BANKS]; + static int gpio_irq_set_wake(struct irq_data *d, unsigned state) { struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); unsigned bank = at91_gpio->pioc_idx; + unsigned mask = 1 << d->hwirq; if (unlikely(bank >= MAX_GPIO_BANKS)) return -EINVAL; + if (state) + wakeups[bank] |= mask; + else + wakeups[bank] &= ~mask; + irq_set_irq_wake(at91_gpio->pioc_virq, state); return 0; } + +void at91_pinctrl_gpio_suspend(void) +{ + int i; + + for (i = 0; i < gpio_banks; i++) { + void __iomem *pio; + + if (!gpio_chips[i]) + continue; + + pio = gpio_chips[i]->regbase; + + backups[i] = __raw_readl(pio + PIO_IMR); + __raw_writel(backups[i], pio + PIO_IDR); + __raw_writel(wakeups[i], pio + PIO_IER); + + if (!wakeups[i]) { + clk_unprepare(gpio_chips[i]->clock); + clk_disable(gpio_chips[i]->clock); + } else { + printk(KERN_DEBUG "GPIO-%c may wake for %08x\n", + 'A'+i, wakeups[i]); + } + } +} + +void at91_pinctrl_gpio_resume(void) +{ + int i; + + for (i = 0; i < gpio_banks; i++) { + void __iomem *pio; + + if (!gpio_chips[i]) + continue; + + pio = gpio_chips[i]->regbase; + + if (!wakeups[i]) { + if (clk_prepare(gpio_chips[i]->clock) == 0) + clk_enable(gpio_chips[i]->clock); + } + + __raw_writel(wakeups[i], pio + PIO_IDR); + __raw_writel(backups[i], pio + PIO_IER); + } +} + #else #define gpio_irq_set_wake NULL -#endif +#endif /* CONFIG_PM */ static struct irq_chip gpio_irqchip = { .name = "GPIO", diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 1a00658b3ea0..bd83c8b01cd1 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, } if (!gpio_range) { + /* + * A pin should not be freed more times than allocated. + */ + if (WARN_ON(!desc->mux_usecount)) + return NULL; desc->mux_usecount--; if (desc->mux_usecount) return NULL; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 45cacf79f3a7..1a779bbfb87d 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = { { KE_KEY, 0x2142, { KEY_MEDIA } }, { KE_KEY, 0x213b, { KEY_INFO } }, { KE_KEY, 0x2169, { KEY_DIRECTION } }, - { KE_KEY, 0x216a, { KEY_SETUP } }, { KE_KEY, 0x231b, { KEY_HELP } }, { KE_END, 0 } }; @@ -925,9 +924,6 @@ static int __init hp_wmi_init(void) err = hp_wmi_input_setup(); if (err) return err; - - //Enable magic for hotkeys that run on the SMBus - ec_write(0xe6,0x6e); } if (bios_capable) { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9a907567f41e..edec135b1685 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1964,9 +1964,6 @@ struct tp_nvram_state { /* kthread for the hotkey poller */ static struct task_struct *tpacpi_hotkey_task; -/* Acquired while the poller kthread is running, use to sync start/stop */ -static struct mutex hotkey_thread_mutex; - /* * Acquire mutex to write poller control variables as an * atomic block. @@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data) unsigned int poll_freq; bool was_frozen; - mutex_lock(&hotkey_thread_mutex); - if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) goto exit; @@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data) } exit: - mutex_unlock(&hotkey_thread_mutex); return 0; } @@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void) if (tpacpi_hotkey_task) { kthread_stop(tpacpi_hotkey_task); tpacpi_hotkey_task = NULL; - mutex_lock(&hotkey_thread_mutex); - /* at this point, the thread did exit */ - mutex_unlock(&hotkey_thread_mutex); } } @@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) mutex_init(&hotkey_mutex); #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL - mutex_init(&hotkey_thread_mutex); mutex_init(&hotkey_thread_data_mutex); #endif diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index cc1f7bf53fd0..c6d77e20622c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -4,7 +4,7 @@ menu "Remoteproc drivers" config REMOTEPROC tristate depends on HAS_DMA - select FW_CONFIG + select FW_LOADER select VIRTIO config OMAP_REMOTEPROC diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 29387df4bfc9..8edb4aed5d36 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) * TODO: support predefined notifyids (via resource table) */ ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); - if (ret) { + if (ret < 0) { dev_err(dev, "idr_alloc failed: %d\n", ret); dma_free_coherent(dev->parent, size, va, dma); return ret; @@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* it is now safe to add the virtio device */ ret = rproc_add_virtio_dev(rvdev, rsc->id); if (ret) - goto free_rvdev; + goto remove_rvdev; return 0; +remove_rvdev: + list_del(&rvdev->node); free_rvdev: kfree(rvdev); return ret; diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index a7743c069339..fb95c4220052 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c @@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev) /* Unregister as remoteproc device */ rproc_del(sproc->rproc); + dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, + sproc->fw_addr, sproc->fw_dma_addr); rproc_put(sproc->rproc); mdev->drv_data = NULL; @@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev) /* Register as a remoteproc device */ err = rproc_add(rproc); if (err) - goto free_rproc; + goto free_mem; return 0; +free_mem: + dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, + sproc->fw_addr, sproc->fw_dma_addr); free_rproc: /* Reset device data upon error */ mdev->drv_data = NULL; diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c index 0dde688ca09b..969abbad7fe3 100644 --- a/drivers/rtc/rtc-da9052.c +++ b/drivers/rtc/rtc-da9052.c @@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev) rtc->da9052 = dev_get_drvdata(pdev->dev.parent); platform_set_drvdata(pdev, rtc); - rtc->irq = platform_get_irq_byname(pdev, "ALM"); - ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, - da9052_rtc_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - "ALM", rtc); + rtc->irq = DA9052_IRQ_ALARM; + ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM", + da9052_rtc_irq, rtc); if (ret != 0) { rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); return ret; diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c index 57233c885998..8f87fec27ce7 100644 --- a/drivers/rtc/rtc-mv.c +++ b/drivers/rtc/rtc-mv.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/of.h> #include <linux/delay.h> +#include <linux/clk.h> #include <linux/gfp.h> #include <linux/module.h> @@ -41,6 +42,7 @@ struct rtc_plat_data { struct rtc_device *rtc; void __iomem *ioaddr; int irq; + struct clk *clk; }; static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm) @@ -221,6 +223,7 @@ static int mv_rtc_probe(struct platform_device *pdev) struct rtc_plat_data *pdata; resource_size_t size; u32 rtc_time; + int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) @@ -239,11 +242,17 @@ static int mv_rtc_probe(struct platform_device *pdev) if (!pdata->ioaddr) return -ENOMEM; + pdata->clk = devm_clk_get(&pdev->dev, NULL); + /* Not all SoCs require a clock.*/ + if (!IS_ERR(pdata->clk)) + clk_prepare_enable(pdata->clk); + /* make sure the 24 hours mode is enabled */ rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); if (rtc_time & RTC_HOURS_12H_MODE) { dev_err(&pdev->dev, "24 Hours mode not supported.\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } /* make sure it is actually functional */ @@ -252,7 +261,8 @@ static int mv_rtc_probe(struct platform_device *pdev) rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); if (rtc_time == 0x01000000) { dev_err(&pdev->dev, "internal RTC not ticking\n"); - return -ENODEV; + ret = -ENODEV; + goto out; } } @@ -268,8 +278,10 @@ static int mv_rtc_probe(struct platform_device *pdev) } else pdata->rtc = rtc_device_register(pdev->name, &pdev->dev, &mv_rtc_ops, THIS_MODULE); - if (IS_ERR(pdata->rtc)) - return PTR_ERR(pdata->rtc); + if (IS_ERR(pdata->rtc)) { + ret = PTR_ERR(pdata->rtc); + goto out; + } if (pdata->irq >= 0) { writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); @@ -282,6 +294,11 @@ static int mv_rtc_probe(struct platform_device *pdev) } return 0; +out: + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + + return ret; } static int __exit mv_rtc_remove(struct platform_device *pdev) @@ -292,6 +309,9 @@ static int __exit mv_rtc_remove(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 0); rtc_device_unregister(pdata->rtc); + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index f1b7fdc58a5f..82758cbb220b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -246,7 +246,7 @@ static struct dentry *dasd_debugfs_setup(const char *name, static int dasd_state_known_to_basic(struct dasd_device *device) { struct dasd_block *block = device->block; - int rc; + int rc = 0; /* Allocate and register gendisk structure. */ if (block) { @@ -273,7 +273,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device) DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); device->state = DASD_STATE_BASIC; - return 0; + + return rc; } /* @@ -282,6 +283,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device) static int dasd_state_basic_to_known(struct dasd_device *device) { int rc; + if (device->block) { dasd_profile_exit(&device->block->profile); if (device->block->debugfs_dentry) @@ -332,8 +334,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) if (block->base->discipline->do_analysis != NULL) rc = block->base->discipline->do_analysis(block); if (rc) { - if (rc != -EAGAIN) + if (rc != -EAGAIN) { device->state = DASD_STATE_UNFMT; + goto out; + } return rc; } dasd_setup_queue(block); @@ -341,11 +345,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) block->blocks << block->s2b_shift); device->state = DASD_STATE_READY; rc = dasd_scan_partitions(block); - if (rc) + if (rc) { device->state = DASD_STATE_BASIC; + return rc; + } } else { device->state = DASD_STATE_READY; } +out: + if (device->discipline->basic_to_ready) + rc = device->discipline->basic_to_ready(device); return rc; } @@ -368,6 +377,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) { int rc; + if (device->discipline->ready_to_basic) { + rc = device->discipline->ready_to_basic(device); + if (rc) + return rc; + } device->state = DASD_STATE_BASIC; if (device->block) { struct dasd_block *block = device->block; @@ -402,16 +416,10 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device) static int dasd_state_ready_to_online(struct dasd_device * device) { - int rc; struct gendisk *disk; struct disk_part_iter piter; struct hd_struct *part; - if (device->discipline->ready_to_online) { - rc = device->discipline->ready_to_online(device); - if (rc) - return rc; - } device->state = DASD_STATE_ONLINE; if (device->block) { dasd_schedule_block_bh(device->block); @@ -444,6 +452,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device) if (rc) return rc; } + device->state = DASD_STATE_READY; if (device->block && !(device->features & DASD_FEATURE_USERAW)) { disk = device->block->bdev->bd_disk; @@ -2223,6 +2232,77 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) return rc; } +static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) +{ + struct dasd_ccw_req *cqr; + + list_for_each_entry(cqr, ccw_queue, blocklist) { + if (cqr->callback_data != DASD_SLEEPON_END_TAG) + return 0; + } + + return 1; +} + +static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) +{ + struct dasd_device *device; + int rc; + struct dasd_ccw_req *cqr, *n; + +retry: + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + device = cqr->startdev; + if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ + continue; + + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + continue; + } + /*Non-temporary stop condition will trigger fail fast*/ + if (device->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + !dasd_eer_enabled(device)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EAGAIN; + continue; + } + + /*Don't try to start requests if device is stopped*/ + if (interruptible) { + rc = wait_event_interruptible( + generic_waitq, !device->stopped); + if (rc == -ERESTARTSYS) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = rc; + continue; + } + } else + wait_event(generic_waitq, !(device->stopped)); + + if (!cqr->callback) + cqr->callback = dasd_wakeup_cb; + cqr->callback_data = DASD_SLEEPON_START_TAG; + dasd_add_request_tail(cqr); + } + + wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); + + rc = 0; + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + if (__dasd_sleep_on_erp(cqr)) + rc = 1; + } + if (rc) + goto retry; + + + return 0; +} + /* * Queue a request to the tail of the device ccw_queue and wait for * it's completion. @@ -2233,6 +2313,15 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) } /* + * Start requests from a ccw_queue and wait for their completion. + */ +int dasd_sleep_on_queue(struct list_head *ccw_queue) +{ + return _dasd_sleep_on_queue(ccw_queue, 0); +} +EXPORT_SYMBOL(dasd_sleep_on_queue); + +/* * Queue a request to the tail of the device ccw_queue and wait * interruptible for it's completion. */ @@ -2663,6 +2752,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) } /* + * Requeue a request back to the block request queue + * only works for block requests + */ +static int _dasd_requeue_request(struct dasd_ccw_req *cqr) +{ + struct dasd_block *block = cqr->block; + struct request *req; + unsigned long flags; + + if (!block) + return -EINVAL; + spin_lock_irqsave(&block->queue_lock, flags); + req = (struct request *) cqr->callback_data; + blk_requeue_request(block->request_queue, req); + spin_unlock_irqrestore(&block->queue_lock, flags); + + return 0; +} + +/* * Go through all request on the dasd_block request queue, cancel them * on the respective dasd_device, and return them to the generic * block layer. @@ -3380,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path); int dasd_generic_pm_freeze(struct ccw_device *cdev) { + struct dasd_device *device = dasd_device_from_cdev(cdev); + struct list_head freeze_queue; struct dasd_ccw_req *cqr, *n; + struct dasd_ccw_req *refers; int rc; - struct list_head freeze_queue; - struct dasd_device *device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) return PTR_ERR(device); @@ -3396,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) /* disallow new I/O */ dasd_device_set_stop_bits(device, DASD_STOPPED_PM); - /* clear active requests */ + + /* clear active requests and requeue them to block layer if possible */ INIT_LIST_HEAD(&freeze_queue); spin_lock_irq(get_ccwdev_lock(cdev)); rc = 0; @@ -3416,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) } list_move_tail(&cqr->devlist, &freeze_queue); } - spin_unlock_irq(get_ccwdev_lock(cdev)); list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { @@ -3424,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) (cqr->status != DASD_CQR_CLEAR_PENDING)); if (cqr->status == DASD_CQR_CLEARED) cqr->status = DASD_CQR_QUEUED; + + /* requeue requests to blocklayer will only work for + block device requests */ + if (_dasd_requeue_request(cqr)) + continue; + + /* remove requests from device and block queue */ + list_del_init(&cqr->devlist); + while (cqr->refers != NULL) { + refers = cqr->refers; + /* remove the request from the block queue */ + list_del(&cqr->blocklist); + /* free the finished erp request */ + dasd_free_erp_request(cqr, cqr->memdev); + cqr = refers; + } + if (cqr->block) + list_del_init(&cqr->blocklist); + cqr->block->base->discipline->free_cp( + cqr, (struct request *) cqr->callback_data); } - /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(cdev)); - list_splice_tail(&freeze_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(cdev)); + /* + * if requests remain then they are internal request + * and go back to the device queue + */ + if (!list_empty(&freeze_queue)) { + /* move freeze_queue to start of the ccw_queue */ + spin_lock_irq(get_ccwdev_lock(cdev)); + list_splice_tail(&freeze_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(cdev)); + } dasd_put_device(device); return rc; } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index c196827c228f..a71bb8aaca1d 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -410,8 +410,7 @@ dasd_add_busid(const char *bus_id, int features) struct dasd_devmap *devmap, *new, *tmp; int hash; - new = (struct dasd_devmap *) - kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL); + new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); spin_lock(&dasd_devmap_lock); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 6999fd919e94..6a44b27623ed 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2022,7 +2022,7 @@ static int dasd_eckd_do_analysis(struct dasd_block *block) return dasd_eckd_end_analysis(block); } -static int dasd_eckd_ready_to_online(struct dasd_device *device) +static int dasd_eckd_basic_to_ready(struct dasd_device *device) { return dasd_alias_add_device(device); }; @@ -2031,6 +2031,11 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device) { cancel_work_sync(&device->reload_device); cancel_work_sync(&device->kick_validate); + return 0; +}; + +static int dasd_eckd_ready_to_basic(struct dasd_device *device) +{ return dasd_alias_remove_device(device); }; @@ -2050,45 +2055,34 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) } static struct dasd_ccw_req * -dasd_eckd_format_device(struct dasd_device * device, - struct format_data_t * fdata) +dasd_eckd_build_format(struct dasd_device *base, + struct format_data_t *fdata) { - struct dasd_eckd_private *private; + struct dasd_eckd_private *base_priv; + struct dasd_eckd_private *start_priv; + struct dasd_device *startdev; struct dasd_ccw_req *fcp; struct eckd_count *ect; + struct ch_t address; struct ccw1 *ccw; void *data; int rpt; - struct ch_t address; int cplength, datasize; - int i; + int i, j; int intensity = 0; int r0_perm; + int nr_tracks; - private = (struct dasd_eckd_private *) device->private; - rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); - set_ch_t(&address, - fdata->start_unit / private->rdc_data.trk_per_cyl, - fdata->start_unit % private->rdc_data.trk_per_cyl); + startdev = dasd_alias_get_start_dev(base); + if (!startdev) + startdev = base; - /* Sanity checks. */ - if (fdata->start_unit >= - (private->real_cyl * private->rdc_data.trk_per_cyl)) { - dev_warn(&device->cdev->dev, "Start track number %d used in " - "formatting is too big\n", fdata->start_unit); - return ERR_PTR(-EINVAL); - } - if (fdata->start_unit > fdata->stop_unit) { - dev_warn(&device->cdev->dev, "Start track %d used in " - "formatting exceeds end track\n", fdata->start_unit); - return ERR_PTR(-EINVAL); - } - if (dasd_check_blocksize(fdata->blksize) != 0) { - dev_warn(&device->cdev->dev, - "The DASD cannot be formatted with block size %d\n", - fdata->blksize); - return ERR_PTR(-EINVAL); - } + start_priv = (struct dasd_eckd_private *) startdev->private; + base_priv = (struct dasd_eckd_private *) base->private; + + rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); + + nr_tracks = fdata->stop_unit - fdata->start_unit + 1; /* * fdata->intensity is a bit string that tells us what to do: @@ -2106,149 +2100,282 @@ dasd_eckd_format_device(struct dasd_device * device, r0_perm = 1; intensity = fdata->intensity; } + switch (intensity) { case 0x00: /* Normal format */ case 0x08: /* Normal format, use cdl. */ - cplength = 2 + rpt; - datasize = sizeof(struct DE_eckd_data) + + cplength = 2 + (rpt*nr_tracks); + datasize = sizeof(struct PFX_eckd_data) + sizeof(struct LO_eckd_data) + - rpt * sizeof(struct eckd_count); + rpt * nr_tracks * sizeof(struct eckd_count); break; case 0x01: /* Write record zero and format track. */ case 0x09: /* Write record zero and format track, use cdl. */ - cplength = 3 + rpt; - datasize = sizeof(struct DE_eckd_data) + + cplength = 2 + rpt * nr_tracks; + datasize = sizeof(struct PFX_eckd_data) + sizeof(struct LO_eckd_data) + sizeof(struct eckd_count) + - rpt * sizeof(struct eckd_count); + rpt * nr_tracks * sizeof(struct eckd_count); break; case 0x04: /* Invalidate track. */ case 0x0c: /* Invalidate track, use cdl. */ cplength = 3; - datasize = sizeof(struct DE_eckd_data) + + datasize = sizeof(struct PFX_eckd_data) + sizeof(struct LO_eckd_data) + sizeof(struct eckd_count); break; default: - dev_warn(&device->cdev->dev, "An I/O control call used " - "incorrect flags 0x%x\n", fdata->intensity); + dev_warn(&startdev->cdev->dev, + "An I/O control call used incorrect flags 0x%x\n", + fdata->intensity); return ERR_PTR(-EINVAL); } /* Allocate the format ccw request. */ - fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); + fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, + datasize, startdev); if (IS_ERR(fcp)) return fcp; + start_priv->count++; data = fcp->data; ccw = fcp->cpaddr; switch (intensity & ~0x08) { case 0x00: /* Normal format. */ - define_extent(ccw++, (struct DE_eckd_data *) data, - fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_CKD, device); + prefix(ccw++, (struct PFX_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_CKD, base, startdev); /* grant subsystem permission to format R0 */ if (r0_perm) - ((struct DE_eckd_data *)data)->ga_extended |= 0x04; - data += sizeof(struct DE_eckd_data); + ((struct PFX_eckd_data *)data) + ->define_extent.ga_extended |= 0x04; + data += sizeof(struct PFX_eckd_data); ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, - fdata->start_unit, 0, rpt, - DASD_ECKD_CCW_WRITE_CKD, device, + fdata->start_unit, 0, rpt*nr_tracks, + DASD_ECKD_CCW_WRITE_CKD, base, fdata->blksize); data += sizeof(struct LO_eckd_data); break; case 0x01: /* Write record zero + format track. */ - define_extent(ccw++, (struct DE_eckd_data *) data, - fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_RECORD_ZERO, - device); - data += sizeof(struct DE_eckd_data); + prefix(ccw++, (struct PFX_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_RECORD_ZERO, + base, startdev); + data += sizeof(struct PFX_eckd_data); ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, - fdata->start_unit, 0, rpt + 1, - DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, - device->block->bp_block); + fdata->start_unit, 0, rpt * nr_tracks + 1, + DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, + base->block->bp_block); data += sizeof(struct LO_eckd_data); break; case 0x04: /* Invalidate track. */ - define_extent(ccw++, (struct DE_eckd_data *) data, - fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_CKD, device); - data += sizeof(struct DE_eckd_data); + prefix(ccw++, (struct PFX_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_CKD, base, startdev); + data += sizeof(struct PFX_eckd_data); ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, fdata->start_unit, 0, 1, - DASD_ECKD_CCW_WRITE_CKD, device, 8); + DASD_ECKD_CCW_WRITE_CKD, base, 8); data += sizeof(struct LO_eckd_data); break; } - if (intensity & 0x01) { /* write record zero */ - ect = (struct eckd_count *) data; - data += sizeof(struct eckd_count); - ect->cyl = address.cyl; - ect->head = address.head; - ect->record = 0; - ect->kl = 0; - ect->dl = 8; - ccw[-1].flags |= CCW_FLAG_CC; - ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; - ccw->flags = CCW_FLAG_SLI; - ccw->count = 8; - ccw->cda = (__u32)(addr_t) ect; - ccw++; - } - if ((intensity & ~0x08) & 0x04) { /* erase track */ - ect = (struct eckd_count *) data; - data += sizeof(struct eckd_count); - ect->cyl = address.cyl; - ect->head = address.head; - ect->record = 1; - ect->kl = 0; - ect->dl = 0; - ccw[-1].flags |= CCW_FLAG_CC; - ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; - ccw->flags = CCW_FLAG_SLI; - ccw->count = 8; - ccw->cda = (__u32)(addr_t) ect; - } else { /* write remaining records */ - for (i = 0; i < rpt; i++) { + + for (j = 0; j < nr_tracks; j++) { + /* calculate cylinder and head for the current track */ + set_ch_t(&address, + (fdata->start_unit + j) / + base_priv->rdc_data.trk_per_cyl, + (fdata->start_unit + j) % + base_priv->rdc_data.trk_per_cyl); + if (intensity & 0x01) { /* write record zero */ ect = (struct eckd_count *) data; data += sizeof(struct eckd_count); ect->cyl = address.cyl; ect->head = address.head; - ect->record = i + 1; + ect->record = 0; ect->kl = 0; - ect->dl = fdata->blksize; - /* Check for special tracks 0-1 when formatting CDL */ - if ((intensity & 0x08) && - fdata->start_unit == 0) { - if (i < 3) { - ect->kl = 4; - ect->dl = sizes_trk0[i] - 4; - } - } - if ((intensity & 0x08) && - fdata->start_unit == 1) { - ect->kl = 44; - ect->dl = LABEL_SIZE - 44; - } + ect->dl = 8; ccw[-1].flags |= CCW_FLAG_CC; - ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; + ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; ccw->cda = (__u32)(addr_t) ect; ccw++; } + if ((intensity & ~0x08) & 0x04) { /* erase track */ + ect = (struct eckd_count *) data; + data += sizeof(struct eckd_count); + ect->cyl = address.cyl; + ect->head = address.head; + ect->record = 1; + ect->kl = 0; + ect->dl = 0; + ccw[-1].flags |= CCW_FLAG_CC; + ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; + ccw->flags = CCW_FLAG_SLI; + ccw->count = 8; + ccw->cda = (__u32)(addr_t) ect; + } else { /* write remaining records */ + for (i = 0; i < rpt; i++) { + ect = (struct eckd_count *) data; + data += sizeof(struct eckd_count); + ect->cyl = address.cyl; + ect->head = address.head; + ect->record = i + 1; + ect->kl = 0; + ect->dl = fdata->blksize; + /* + * Check for special tracks 0-1 + * when formatting CDL + */ + if ((intensity & 0x08) && + fdata->start_unit == 0) { + if (i < 3) { + ect->kl = 4; + ect->dl = sizes_trk0[i] - 4; + } + } + if ((intensity & 0x08) && + fdata->start_unit == 1) { + ect->kl = 44; + ect->dl = LABEL_SIZE - 44; + } + ccw[-1].flags |= CCW_FLAG_CC; + if (i != 0 || j == 0) + ccw->cmd_code = + DASD_ECKD_CCW_WRITE_CKD; + else + ccw->cmd_code = + DASD_ECKD_CCW_WRITE_CKD_MT; + ccw->flags = CCW_FLAG_SLI; + ccw->count = 8; + ccw->cda = (__u32)(addr_t) ect; + ccw++; + } + } } - fcp->startdev = device; - fcp->memdev = device; + + fcp->startdev = startdev; + fcp->memdev = startdev; fcp->retries = 256; + fcp->expires = startdev->default_expires * HZ; fcp->buildclk = get_tod_clock(); fcp->status = DASD_CQR_FILLED; + return fcp; } +static int +dasd_eckd_format_device(struct dasd_device *base, + struct format_data_t *fdata) +{ + struct dasd_ccw_req *cqr, *n; + struct dasd_block *block; + struct dasd_eckd_private *private; + struct list_head format_queue; + struct dasd_device *device; + int old_stop, format_step; + int step, rc = 0; + + block = base->block; + private = (struct dasd_eckd_private *) base->private; + + /* Sanity checks. */ + if (fdata->start_unit >= + (private->real_cyl * private->rdc_data.trk_per_cyl)) { + dev_warn(&base->cdev->dev, + "Start track number %u used in formatting is too big\n", + fdata->start_unit); + return -EINVAL; + } + if (fdata->stop_unit >= + (private->real_cyl * private->rdc_data.trk_per_cyl)) { + dev_warn(&base->cdev->dev, + "Stop track number %u used in formatting is too big\n", + fdata->stop_unit); + return -EINVAL; + } + if (fdata->start_unit > fdata->stop_unit) { + dev_warn(&base->cdev->dev, + "Start track %u used in formatting exceeds end track\n", + fdata->start_unit); + return -EINVAL; + } + if (dasd_check_blocksize(fdata->blksize) != 0) { + dev_warn(&base->cdev->dev, + "The DASD cannot be formatted with block size %u\n", + fdata->blksize); + return -EINVAL; + } + + INIT_LIST_HEAD(&format_queue); + old_stop = fdata->stop_unit; + + while (fdata->start_unit <= 1) { + fdata->stop_unit = fdata->start_unit; + cqr = dasd_eckd_build_format(base, fdata); + list_add(&cqr->blocklist, &format_queue); + + fdata->stop_unit = old_stop; + fdata->start_unit++; + + if (fdata->start_unit > fdata->stop_unit) + goto sleep; + } + +retry: + format_step = 255 / recs_per_track(&private->rdc_data, 0, + fdata->blksize); + while (fdata->start_unit <= old_stop) { + step = fdata->stop_unit - fdata->start_unit + 1; + if (step > format_step) + fdata->stop_unit = fdata->start_unit + format_step - 1; + + cqr = dasd_eckd_build_format(base, fdata); + if (IS_ERR(cqr)) { + if (PTR_ERR(cqr) == -ENOMEM) { + /* + * not enough memory available + * go to out and start requests + * retry after first requests were finished + */ + fdata->stop_unit = old_stop; + goto sleep; + } else + return PTR_ERR(cqr); + } + list_add(&cqr->blocklist, &format_queue); + + fdata->start_unit = fdata->stop_unit + 1; + fdata->stop_unit = old_stop; + } + +sleep: + dasd_sleep_on_queue(&format_queue); + + list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { + device = cqr->startdev; + private = (struct dasd_eckd_private *) device->private; + if (cqr->status == DASD_CQR_FAILED) + rc = -EIO; + list_del_init(&cqr->blocklist); + dasd_sfree_request(cqr, device); + private->count--; + } + + /* + * in case of ENOMEM we need to retry after + * first requests are finished + */ + if (fdata->start_unit <= fdata->stop_unit) + goto retry; + + return rc; +} + static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) { cqr->status = DASD_CQR_FILLED; @@ -4305,8 +4432,9 @@ static struct dasd_discipline dasd_eckd_discipline = { .uncheck_device = dasd_eckd_uncheck_device, .do_analysis = dasd_eckd_do_analysis, .verify_path = dasd_eckd_verify_path, - .ready_to_online = dasd_eckd_ready_to_online, + .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, + .ready_to_basic = dasd_eckd_ready_to_basic, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 899e3f5a56e5..0785bd9bd5b6 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -300,10 +300,11 @@ struct dasd_discipline { * Last things to do when a device is set online, and first things * when it is set offline. */ - int (*ready_to_online) (struct dasd_device *); + int (*basic_to_ready) (struct dasd_device *); int (*online_to_ready) (struct dasd_device *); + int (*ready_to_basic) (struct dasd_device *); - /* + /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for * a block device request, start_io starts the request and * term_IO cancels it (e.g. in case of a timeout). format_device @@ -317,8 +318,8 @@ struct dasd_discipline { int (*start_IO) (struct dasd_ccw_req *); int (*term_IO) (struct dasd_ccw_req *); void (*handle_terminated_request) (struct dasd_ccw_req *); - struct dasd_ccw_req *(*format_device) (struct dasd_device *, - struct format_data_t *); + int (*format_device) (struct dasd_device *, + struct format_data_t *); int (*free_cp) (struct dasd_ccw_req *, struct request *); /* @@ -672,6 +673,7 @@ int dasd_term_IO(struct dasd_ccw_req *); void dasd_schedule_device_bh(struct dasd_device *); void dasd_schedule_block_bh(struct dasd_block *); int dasd_sleep_on(struct dasd_ccw_req *); +int dasd_sleep_on_queue(struct list_head *); int dasd_sleep_on_immediatly(struct dasd_ccw_req *); int dasd_sleep_on_interruptible(struct dasd_ccw_req *); void dasd_device_set_timer(struct dasd_device *, int); diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 03c0e0444553..8be1b51e9311 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -143,12 +143,12 @@ static int dasd_ioctl_resume(struct dasd_block *block) /* * performs formatting of _device_ according to _fdata_ * Note: The discipline's format_function is assumed to deliver formatting - * commands to format a single unit of the device. In terms of the ECKD - * devices this means CCWs are generated to format a single track. + * commands to format multiple units of the device. In terms of the ECKD + * devices this means CCWs are generated to format multiple tracks. */ -static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) +static int +dasd_format(struct dasd_block *block, struct format_data_t *fdata) { - struct dasd_ccw_req *cqr; struct dasd_device *base; int rc; @@ -157,8 +157,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) return -EPERM; if (base->state != DASD_STATE_BASIC) { - pr_warning("%s: The DASD cannot be formatted while it is " - "enabled\n", dev_name(&base->cdev->dev)); + pr_warn("%s: The DASD cannot be formatted while it is enabled\n", + dev_name(&base->cdev->dev)); return -EBUSY; } @@ -178,21 +178,10 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) bdput(bdev); } - while (fdata->start_unit <= fdata->stop_unit) { - cqr = base->discipline->format_device(base, fdata); - if (IS_ERR(cqr)) - return PTR_ERR(cqr); - rc = dasd_sleep_on_interruptible(cqr); - dasd_sfree_request(cqr, cqr->memdev); - if (rc) { - if (rc != -ERESTARTSYS) - pr_err("%s: Formatting unit %d failed with " - "rc=%d\n", dev_name(&base->cdev->dev), - fdata->start_unit, rc); - return rc; - } - fdata->start_unit++; - } + rc = base->discipline->format_device(base, fdata); + if (rc) + return rc; + return 0; } diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 9978ad4433cb..b303cab76a7f 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = { .release = scm_release, }; +static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) +{ + return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; +} + static void scm_request_prepare(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; @@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq) scm_release_cluster(scmrq); blk_requeue_request(bdev->rq, scmrq->request); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); scm_ensure_queue_restart(bdev); } void scm_request_finish(struct scm_request *scmrq) { + struct scm_blk_dev *bdev = scmrq->bdev; + scm_release_cluster(scmrq); blk_end_request_all(scmrq->request, scmrq->error); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } @@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq) if (req->cmd_type != REQ_TYPE_FS) continue; + if (!scm_permit_request(bdev, req)) { + scm_ensure_queue_restart(bdev); + return; + } scmrq = scm_request_fetch(); if (!scmrq) { SCM_LOG(5, "no request"); @@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq) return; } if (scm_need_cluster_request(scmrq)) { + atomic_inc(&bdev->queued_reqs); blk_start_request(req); scm_initiate_cluster_request(scmrq); return; } scm_request_prepare(scmrq); + atomic_inc(&bdev->queued_reqs); blk_start_request(req); ret = scm_start_aob(scmrq->aob); @@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq) scm_request_requeue(scmrq); return; } - atomic_inc(&bdev->queued_reqs); } } @@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error) tasklet_hi_schedule(&bdev->tasklet); } +static void scm_blk_handle_error(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + unsigned long flags; + + if (scmrq->error != -EIO) + goto restart; + + /* For -EIO the response block is valid. */ + switch (scmrq->aob->response.eqc) { + case EQC_WR_PROHIBIT: + spin_lock_irqsave(&bdev->lock, flags); + if (bdev->state != SCM_WR_PROHIBIT) + pr_info("%lx: Write access to the SCM increment is suspended\n", + (unsigned long) bdev->scmdev->address); + bdev->state = SCM_WR_PROHIBIT; + spin_unlock_irqrestore(&bdev->lock, flags); + goto requeue; + default: + break; + } + +restart: + if (!scm_start_aob(scmrq->aob)) + return; + +requeue: + spin_lock_irqsave(&bdev->rq_lock, flags); + scm_request_requeue(scmrq); + spin_unlock_irqrestore(&bdev->rq_lock, flags); +} + static void scm_blk_tasklet(struct scm_blk_dev *bdev) { struct scm_request *scmrq; @@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) spin_unlock_irqrestore(&bdev->lock, flags); if (scmrq->error && scmrq->retries-- > 0) { - if (scm_start_aob(scmrq->aob)) { - spin_lock_irqsave(&bdev->rq_lock, flags); - scm_request_requeue(scmrq); - spin_unlock_irqrestore(&bdev->rq_lock, flags); - } + scm_blk_handle_error(scmrq); + /* Request restarted or requeued, handle next. */ spin_lock_irqsave(&bdev->lock, flags); continue; @@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) } scm_request_finish(scmrq); - atomic_dec(&bdev->queued_reqs); spin_lock_irqsave(&bdev->lock, flags); } spin_unlock_irqrestore(&bdev->lock, flags); @@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) } bdev->scmdev = scmdev; + bdev->state = SCM_OPER; spin_lock_init(&bdev->rq_lock); spin_lock_init(&bdev->lock); INIT_LIST_HEAD(&bdev->finished_requests); @@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) put_disk(bdev->gendisk); } +void scm_blk_set_available(struct scm_blk_dev *bdev) +{ + unsigned long flags; + + spin_lock_irqsave(&bdev->lock, flags); + if (bdev->state == SCM_WR_PROHIBIT) + pr_info("%lx: Write access to the SCM increment is restored\n", + (unsigned long) bdev->scmdev->address); + bdev->state = SCM_OPER; + spin_unlock_irqrestore(&bdev->lock, flags); +} + static int __init scm_blk_init(void) { int ret = -EINVAL; @@ -408,12 +463,15 @@ static int __init scm_blk_init(void) goto out; scm_major = ret; - if (scm_alloc_rqs(nr_requests)) - goto out_unreg; + ret = scm_alloc_rqs(nr_requests); + if (ret) + goto out_free; scm_debug = debug_register("scm_log", 16, 1, 16); - if (!scm_debug) + if (!scm_debug) { + ret = -ENOMEM; goto out_free; + } debug_register_view(scm_debug, &debug_hex_ascii_view); debug_set_level(scm_debug, 2); @@ -428,7 +486,6 @@ out_dbf: debug_unregister(scm_debug); out_free: scm_free_rqs(); -out_unreg: unregister_blkdev(scm_major, "scm"); out: return ret; diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 3c1ccf494647..8b387b32fd62 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -21,6 +21,7 @@ struct scm_blk_dev { spinlock_t rq_lock; /* guard the request queue */ spinlock_t lock; /* guard the rest of the blockdev */ atomic_t queued_reqs; + enum {SCM_OPER, SCM_WR_PROHIBIT} state; struct list_head finished_requests; #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE struct list_head cluster_list; @@ -48,6 +49,7 @@ struct scm_request { int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); void scm_blk_dev_cleanup(struct scm_blk_dev *); +void scm_blk_set_available(struct scm_blk_dev *); void scm_blk_irq(struct scm_device *, void *, int); void scm_request_finish(struct scm_request *); diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index f4bb61b0cea1..c0d102e3a48b 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -223,6 +223,8 @@ void scm_cluster_request_irq(struct scm_request *scmrq) bool scm_cluster_size_valid(void) { - return write_cluster_size == 0 || write_cluster_size == 32 || - write_cluster_size == 64 || write_cluster_size == 128; + if (write_cluster_size == 1 || write_cluster_size > 128) + return false; + + return !(write_cluster_size & (write_cluster_size - 1)); } diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 9fa0a908607b..c98cf52d78d1 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c @@ -13,12 +13,23 @@ #include <asm/eadm.h> #include "scm_blk.h" -static void notify(struct scm_device *scmdev) +static void scm_notify(struct scm_device *scmdev, enum scm_event event) { - pr_info("%lu: The capabilities of the SCM increment changed\n", - (unsigned long) scmdev->address); - SCM_LOG(2, "State changed"); - SCM_LOG_STATE(2, scmdev); + struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); + + switch (event) { + case SCM_CHANGE: + pr_info("%lx: The capabilities of the SCM increment changed\n", + (unsigned long) scmdev->address); + SCM_LOG(2, "State changed"); + SCM_LOG_STATE(2, scmdev); + break; + case SCM_AVAIL: + SCM_LOG(2, "Increment available"); + SCM_LOG_STATE(2, scmdev); + scm_blk_set_available(bdev); + break; + } } static int scm_probe(struct scm_device *scmdev) @@ -64,7 +75,7 @@ static struct scm_driver scm_drv = { .name = "scm_block", .owner = THIS_MODULE, }, - .notify = notify, + .notify = scm_notify, .probe = scm_probe, .remove = scm_remove, .handler = scm_blk_irq, diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 7b00fa634d40..eb5d22795c47 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -502,7 +502,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; #ifdef CONFIG_TN3215_CONSOLE - wait_cons_dev(); + ccw_device_wait_idle(raw->cdev); #endif /* Enough room freed up ? */ if (RAW3215_BUFFER_SIZE - raw->count >= length) @@ -858,7 +858,7 @@ static void con3215_flush(void) raw = raw3215[0]; /* console 3215 is the first one */ if (raw->port.flags & ASYNC_SUSPENDED) /* The console is still frozen for suspend. */ - if (ccw_device_force_console()) + if (ccw_device_force_console(raw->cdev)) /* Forcing didn't work, no panic message .. */ return; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index f4ff515db251..0da3ae3cd63b 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -174,8 +174,7 @@ static void mon_free_mem(struct mon_private *monpriv) int i; for (i = 0; i < MON_MSGLIM; i++) - if (monpriv->msg_array[i]) - kfree(monpriv->msg_array[i]); + kfree(monpriv->msg_array[i]); kfree(monpriv); } diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 4c9030a5b9f2..24a08e8f19e1 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -796,7 +796,7 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) do { __raw3270_reset_device(rp); while (!raw3270_state_final(rp)) { - wait_cons_dev(); + ccw_device_wait_idle(rp->cdev); barrier(); } } while (rp->state != RAW3270_STATE_READY); @@ -810,7 +810,7 @@ raw3270_wait_cons_dev(struct raw3270 *rp) unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - wait_cons_dev(); + ccw_device_wait_idle(rp->cdev); spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); } @@ -1274,7 +1274,7 @@ void raw3270_pm_unfreeze(struct raw3270_view *view) rp = view->dev; if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) - ccw_device_force_console(); + ccw_device_force_console(rp->cdev); #endif } diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 30a2255389e5..178836ec252b 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -561,6 +561,8 @@ static void __init sclp_add_standby_memory(void) add_memory_merged(0); } +#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS) + static void __init insert_increment(u16 rn, int standby, int assigned) { struct memory_increment *incr, *new_incr; @@ -573,7 +575,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) new_incr->rn = rn; new_incr->standby = standby; if (!standby) - new_incr->usecount = 1; + new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { @@ -627,6 +629,8 @@ static int __init sclp_detect_standby_memory(void) struct read_storage_sccb *sccb; int i, id, assigned, rc; + if (OLDMEM_BASE) /* No standby memory in kdump mode */ + return 0; if (!early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba24025..cee69dac3e18 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) int i, rc; /* Check if the tty3270 is already there. */ - view = raw3270_find_view(&tty3270_fn, tty->index); + view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); if (!IS_ERR(view)) { tp = container_of(view, struct tty3270, view); tty->driver_data = tp; @@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) tp->inattr = TF_INPUT; return tty_port_install(&tp->port, driver, tty); } - if (tty3270_max_index < tty->index) - tty3270_max_index = tty->index; + if (tty3270_max_index < tty->index + 1) + tty3270_max_index = tty->index + 1; /* Allocate tty3270 structure on first open. */ tp = tty3270_alloc_view(); if (IS_ERR(tp)) return PTR_ERR(tp); - rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); + rc = raw3270_add_view(&tp->view, &tty3270_fn, + tty->index + RAW3270_FIRSTMINOR); if (rc) { tty3270_free_view(tp); return rc; @@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = { void tty3270_create_cb(int minor) { - tty_register_device(tty3270_driver, minor, NULL); + tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); } void tty3270_destroy_cb(int minor) { - tty_unregister_device(tty3270_driver, minor); + tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); } struct raw3270_notifier tty3270_notifier = @@ -1884,7 +1885,8 @@ static int __init tty3270_init(void) driver->driver_name = "tty3270"; driver->name = "3270/tty"; driver->major = IBM_TTY3270_MAJOR; - driver->minor_start = 0; + driver->minor_start = RAW3270_FIRSTMINOR; + driver->name_base = RAW3270_FIRSTMINOR; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1d61a01576d2..22820610022c 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -127,7 +127,7 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) } if (mode == TO_USER) { if (copy_to_user((__force __user void*) dest + offs, buf, - PAGE_SIZE)) + count - offs)) return -EFAULT; } else memcpy(dest + offs, buf, count - offs); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 50ad5fdd815d..21fabc6d5a9c 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -377,6 +377,26 @@ static void chp_release(struct device *dev) } /** + * chp_update_desc - update channel-path description + * @chp - channel-path + * + * Update the channel-path description of the specified channel-path. + * Return zero on success, non-zero otherwise. + */ +int chp_update_desc(struct channel_path *chp) +{ + int rc; + + rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); + if (rc) + return rc; + + rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); + + return rc; +} + +/** * chp_new - register a new channel-path * @chpid - channel-path ID * @@ -403,7 +423,7 @@ int chp_new(struct chp_id chpid) mutex_init(&chp->lock); /* Obtain channel path description and fill it in. */ - ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); + ret = chp_update_desc(chp); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index e1399dbee834..9284b785a06f 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -44,6 +44,7 @@ struct channel_path { struct mutex lock; /* Serialize access to below members. */ int state; struct channel_path_desc desc; + struct channel_path_desc_fmt1 desc_fmt1; /* Channel-measurement related stuff: */ int cmg; int shared; @@ -62,6 +63,7 @@ int chp_is_registered(struct chp_id chpid); void *chp_get_chp_desc(struct chp_id chpid); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); +int chp_update_desc(struct channel_path *chp); int chp_new(struct chp_id chpid); void chp_cfg_schedule(struct chp_id chpid, int configure); void chp_cfg_cancel_deconfigure(struct chp_id chpid); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 31ceef1beb8b..8ea7d9b2c671 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -376,7 +376,7 @@ static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) continue; } mutex_lock(&chp->lock); - chsc_determine_base_channel_path_desc(chpid, &chp->desc); + chp_update_desc(chp); mutex_unlock(&chp->lock); } } @@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) " failed (rc=%d).\n", ret); } +static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) +{ + int ret; + + CIO_CRW_EVENT(4, "chsc: scm available information\n"); + if (sei_area->rs != 7) + return; + + ret = scm_process_availability_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: process availability information" + " failed (rc=%d).\n", ret); +} + static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { switch (sei_area->cc) { @@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) case 12: /* scm change notification */ chsc_process_sei_scm_change(sei_area); break; + case 14: /* scm available notification */ + chsc_process_sei_scm_avail(sei_area); + break; default: /* other stuff */ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); @@ -614,8 +631,8 @@ int chsc_chp_vary(struct chp_id chpid, int on) * Redo PathVerification on the devices the chpid connects to */ if (on) { - /* Try to update the channel path descritor. */ - chsc_determine_base_channel_path_desc(chpid, &chp->desc); + /* Try to update the channel path description. */ + chp_update_desc(chp); for_each_subchannel_staged(s390_subchannel_vary_chpid_on, __s390_vary_chpid_on, &chpid); } else @@ -808,9 +825,10 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, { struct chsc_response_struct *chsc_resp; struct chsc_scpd *scpd_area; + unsigned long flags; int ret; - spin_lock_irq(&chsc_page_lock); + spin_lock_irqsave(&chsc_page_lock, flags); scpd_area = chsc_page; ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); if (ret) @@ -818,7 +836,7 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, chsc_resp = (void *)&scpd_area->response; memcpy(desc, &chsc_resp->data, sizeof(*desc)); out: - spin_unlock_irq(&chsc_page_lock); + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 227e05f674b3..349d5fc47196 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); #ifdef CONFIG_SCM_BUS int scm_update_information(void); +int scm_process_availability_information(void); #else /* CONFIG_SCM_BUS */ static inline int scm_update_information(void) { return 0; } +static inline int scm_process_availability_information(void) { return 0; } #endif /* CONFIG_SCM_BUS */ diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 986ef6a92a41..935d80b4e9ce 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -471,15 +471,6 @@ int cio_disable_subchannel(struct subchannel *sch) } EXPORT_SYMBOL_GPL(cio_disable_subchannel); -int cio_create_sch_lock(struct subchannel *sch) -{ - sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!sch->lock) - return -ENOMEM; - spin_lock_init(sch->lock); - return 0; -} - static int cio_check_devno_blacklisted(struct subchannel *sch) { if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { @@ -536,32 +527,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) sprintf(dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT(4, dbf_txt); - /* Nuke all fields. */ - memset(sch, 0, sizeof(struct subchannel)); - - sch->schid = schid; - if (cio_is_console(schid)) { - sch->lock = cio_get_console_lock(); - } else { - err = cio_create_sch_lock(sch); - if (err) - goto out; - } - mutex_init(&sch->reg_mutex); - /* * The first subchannel that is not-operational (ccode==3) - * indicates that there aren't any more devices available. + * indicates that there aren't any more devices available. * If stsch gets an exception, it means the current subchannel set - * is not valid. + * is not valid. */ - ccode = stsch_err (schid, &sch->schib); + ccode = stsch_err(schid, &sch->schib); if (ccode) { err = (ccode == 3) ? -ENXIO : ccode; goto out; } - /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; + sch->schid = schid; switch (sch->st) { case SUBCHANNEL_TYPE_IO: @@ -578,11 +556,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); - return 0; out: - if (!cio_is_console(schid)) - kfree(sch->lock); - sch->lock = NULL; return err; } @@ -650,15 +624,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs) } #ifdef CONFIG_CCW_CONSOLE -static struct subchannel console_subchannel; -static struct io_subchannel_private console_priv; -static int console_subchannel_in_use; +static struct subchannel *console_sch; /* * Use cio_tsch to update the subchannel status and call the interrupt handler - * if status had been pending. Called with the console_subchannel lock. + * if status had been pending. Called with the subchannel's lock held. */ -static void cio_tsch(struct subchannel *sch) +void cio_tsch(struct subchannel *sch) { struct irb *irb; int irq_context; @@ -675,6 +647,7 @@ static void cio_tsch(struct subchannel *sch) local_bh_disable(); irq_enter(); } + kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else @@ -685,135 +658,90 @@ static void cio_tsch(struct subchannel *sch) } } -void *cio_get_console_priv(void) -{ - return &console_priv; -} - -/* - * busy wait for the next interrupt on the console - */ -void wait_cons_dev(void) +static int cio_test_for_console(struct subchannel_id schid, void *data) { - if (!console_subchannel_in_use) - return; - - while (1) { - cio_tsch(&console_subchannel); - if (console_subchannel.schib.scsw.cmd.actl == 0) - break; - udelay_simple(100); - } -} + struct schib schib; -static int -cio_test_for_console(struct subchannel_id schid, void *data) -{ - if (stsch_err(schid, &console_subchannel.schib) != 0) + if (stsch_err(schid, &schib) != 0) return -ENXIO; - if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && - console_subchannel.schib.pmcw.dnv && - (console_subchannel.schib.pmcw.dev == console_devno)) { + if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && + (schib.pmcw.dev == console_devno)) { console_irq = schid.sch_no; return 1; /* found */ } return 0; } - -static int -cio_get_console_sch_no(void) +static int cio_get_console_sch_no(void) { struct subchannel_id schid; - + struct schib schib; + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; - if (stsch_err(schid, &console_subchannel.schib) != 0 || - (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || - !console_subchannel.schib.pmcw.dnv) + if (stsch_err(schid, &schib) != 0 || + (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) return -1; - console_devno = console_subchannel.schib.pmcw.dev; + console_devno = schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for_each_subchannel(cio_test_for_console, NULL); - if (console_irq == -1) - return -1; - } else { - /* unlike in 2.4, we cannot autoprobe here, since - * the channel subsystem is not fully initialized. - * With some luck, the HWC console can take over */ - return -1; } return console_irq; } -struct subchannel * -cio_probe_console(void) +struct subchannel *cio_probe_console(void) { - int sch_no, ret; struct subchannel_id schid; + struct subchannel *sch; + int sch_no, ret; - if (xchg(&console_subchannel_in_use, 1) != 0) - return ERR_PTR(-EBUSY); sch_no = cio_get_console_sch_no(); if (sch_no == -1) { - console_subchannel_in_use = 0; pr_warning("No CCW console was found\n"); return ERR_PTR(-ENODEV); } - memset(&console_subchannel, 0, sizeof(struct subchannel)); init_subchannel_id(&schid); schid.sch_no = sch_no; - ret = cio_validate_subchannel(&console_subchannel, schid); - if (ret) { - console_subchannel_in_use = 0; - return ERR_PTR(-ENODEV); - } + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return sch; - /* - * enable console I/O-interrupt subclass - */ isc_register(CONSOLE_ISC); - console_subchannel.config.isc = CONSOLE_ISC; - console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; - ret = cio_commit_config(&console_subchannel); + sch->config.isc = CONSOLE_ISC; + sch->config.intparm = (u32)(addr_t)sch; + ret = cio_commit_config(sch); if (ret) { isc_unregister(CONSOLE_ISC); - console_subchannel_in_use = 0; + put_device(&sch->dev); return ERR_PTR(ret); } - return &console_subchannel; -} - -void -cio_release_console(void) -{ - console_subchannel.config.intparm = 0; - cio_commit_config(&console_subchannel); - isc_unregister(CONSOLE_ISC); - console_subchannel_in_use = 0; + console_sch = sch; + return sch; } -/* Bah... hack to catch console special sausages. */ -int -cio_is_console(struct subchannel_id schid) +int cio_is_console(struct subchannel_id schid) { - if (!console_subchannel_in_use) + if (!console_sch) return 0; - return schid_equal(&schid, &console_subchannel.schid); + return schid_equal(&schid, &console_sch->schid); } -struct subchannel * -cio_get_console_subchannel(void) +void cio_register_early_subchannels(void) { - if (!console_subchannel_in_use) - return NULL; - return &console_subchannel; + int ret; + + if (!console_sch) + return; + + ret = css_register_subchannel(console_sch); + if (ret) + put_device(&console_sch->dev); } +#endif /* CONFIG_CCW_CONSOLE */ -#endif static int __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 4a1ff5c2eb88..d62f5e7f3cf1 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -121,23 +121,18 @@ extern int cio_commit_config(struct subchannel *sch); int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); int cio_tm_intrg(struct subchannel *sch); -int cio_create_sch_lock(struct subchannel *); void do_adapter_IO(u8 isc); void do_IRQ(struct pt_regs *); /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); -extern void cio_release_console(void); extern int cio_is_console(struct subchannel_id); -extern struct subchannel *cio_get_console_subchannel(void); -extern spinlock_t * cio_get_console_lock(void); -extern void *cio_get_console_priv(void); +extern void cio_register_early_subchannels(void); +extern void cio_tsch(struct subchannel *sch); #else #define cio_is_console(schid) 0 -#define cio_get_console_subchannel() NULL -#define cio_get_console_lock() NULL -#define cio_get_console_priv() NULL +static inline void cio_register_early_subchannels(void) {} #endif #endif diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a239237d43f3..1ebe5d3ddebb 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -137,37 +137,53 @@ out: static void css_sch_todo(struct work_struct *work); -static struct subchannel * -css_alloc_subchannel(struct subchannel_id schid) +static int css_sch_create_locks(struct subchannel *sch) +{ + sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); + if (!sch->lock) + return -ENOMEM; + + spin_lock_init(sch->lock); + mutex_init(&sch->reg_mutex); + + return 0; +} + +static void css_subchannel_release(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + + sch->config.intparm = 0; + cio_commit_config(sch); + kfree(sch->lock); + kfree(sch); +} + +struct subchannel *css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; - sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); - if (sch == NULL) + sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); + if (!sch) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, schid); - if (ret < 0) { - kfree(sch); - return ERR_PTR(ret); - } + + ret = cio_validate_subchannel(sch, schid); + if (ret < 0) + goto err; + + ret = css_sch_create_locks(sch); + if (ret) + goto err; + INIT_WORK(&sch->todo_work, css_sch_todo); + sch->dev.release = &css_subchannel_release; + device_initialize(&sch->dev); return sch; -} - -static void -css_subchannel_release(struct device *dev) -{ - struct subchannel *sch; - sch = to_subchannel(dev); - if (!cio_is_console(sch->schid)) { - /* Reset intparm to zeroes. */ - sch->config.intparm = 0; - cio_commit_config(sch); - kfree(sch->lock); - kfree(sch); - } +err: + kfree(sch); + return ERR_PTR(ret); } static int css_sch_device_register(struct subchannel *sch) @@ -177,7 +193,7 @@ static int css_sch_device_register(struct subchannel *sch) mutex_lock(&sch->reg_mutex); dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, sch->schid.sch_no); - ret = device_register(&sch->dev); + ret = device_add(&sch->dev); mutex_unlock(&sch->reg_mutex); return ret; } @@ -228,16 +244,11 @@ void css_update_ssd_info(struct subchannel *sch) { int ret; - if (cio_is_console(sch->schid)) { - /* Console is initialized too early for functions requiring - * memory allocation. */ + ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); + if (ret) ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); - } else { - ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); - if (ret) - ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); - ssd_register_chpids(&sch->ssd_info); - } + + ssd_register_chpids(&sch->ssd_info); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, @@ -275,14 +286,13 @@ static const struct attribute_group *default_subch_attr_groups[] = { NULL, }; -static int css_register_subchannel(struct subchannel *sch) +int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; - sch->dev.release = &css_subchannel_release; sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't @@ -314,23 +324,19 @@ static int css_register_subchannel(struct subchannel *sch) return ret; } -int css_probe_device(struct subchannel_id schid) +static int css_probe_device(struct subchannel_id schid) { - int ret; struct subchannel *sch; + int ret; + + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return PTR_ERR(sch); - if (cio_is_console(schid)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(schid); - if (IS_ERR(sch)) - return PTR_ERR(sch); - } ret = css_register_subchannel(sch); - if (ret) { - if (!cio_is_console(schid)) - put_device(&sch->dev); - } + if (ret) + put_device(&sch->dev); + return ret; } @@ -770,7 +776,7 @@ static int __init setup_css(int nr) css->pseudo_subchannel->dev.release = css_subchannel_release; dev_set_name(&css->pseudo_subchannel->dev, "defunct"); mutex_init(&css->pseudo_subchannel->reg_mutex); - ret = cio_create_sch_lock(css->pseudo_subchannel); + ret = css_sch_create_locks(css->pseudo_subchannel); if (ret) { kfree(css->pseudo_subchannel); return ret; @@ -870,8 +876,7 @@ static struct notifier_block css_power_notifier = { /* * Now that the driver core is running, we can setup our channel subsystem. - * The struct subchannel's are created during probing (except for the - * static console subchannel). + * The struct subchannel's are created during probing. */ static int __init css_bus_init(void) { @@ -1050,6 +1055,8 @@ int css_complete_work(void) */ static int __init channel_subsystem_init_sync(void) { + /* Register subchannels which are already in use. */ + cio_register_early_subchannels(); /* Start initial subchannel evaluation. */ css_schedule_eval_all(); css_complete_work(); @@ -1065,9 +1072,8 @@ void channel_subsystem_reinit(void) chsc_enable_facility(CHSC_SDA_OC_MSS); chp_id_for_each(&chpid) { chp = chpid_to_chp(chpid); - if (!chp) - continue; - chsc_determine_base_channel_path_desc(chpid, &chp->desc); + if (chp) + chp_update_desc(chp); } } diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 4af3dfe70ef5..b1de60335238 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -101,7 +101,8 @@ extern int css_driver_register(struct css_driver *); extern void css_driver_unregister(struct css_driver *); extern void css_sch_device_unregister(struct subchannel *); -extern int css_probe_device(struct subchannel_id); +extern int css_register_subchannel(struct subchannel *); +extern struct subchannel *css_alloc_subchannel(struct subchannel_id); extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int max_ssid; @@ -109,7 +110,6 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data); extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); -extern void css_reiterate_subchannels(void); void css_update_ssd_info(struct subchannel *sch); struct channel_subsystem { diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index c6767f5a58b2..1ab5f6c36d9b 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -19,6 +19,7 @@ #include <linux/list.h> #include <linux/device.h> #include <linux/workqueue.h> +#include <linux/delay.h> #include <linux/timer.h> #include <linux/kernel_stat.h> @@ -43,6 +44,10 @@ static DEFINE_SPINLOCK(recovery_lock); static int recovery_phase; static const unsigned long recovery_delay[] = { 3, 30, 300 }; +static atomic_t ccw_device_init_count = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); +static struct bus_type ccw_bus_type; + /******************* bus type handling ***********************/ /* The Linux driver model distinguishes between a bus type and @@ -127,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) return ret; } -static struct bus_type ccw_bus_type; - static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); static int io_subchannel_remove(struct subchannel *); @@ -137,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); static void recovery_func(unsigned long data); -wait_queue_head_t ccw_device_init_wq; -atomic_t ccw_device_init_count; static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, @@ -191,10 +192,7 @@ int __init io_subchannel_init(void) { int ret; - init_waitqueue_head(&ccw_device_init_wq); - atomic_set(&ccw_device_init_count, 0); setup_timer(&recovery_timer, recovery_func, 0); - ret = bus_register(&ccw_bus_type); if (ret) return ret; @@ -1086,19 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch) dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev = sch_get_cdev(sch); - cdev->dev.groups = ccwdev_attr_groups; - device_initialize(&cdev->dev); - cdev->private->flags.initialized = 1; - ccw_device_register(cdev); - /* - * Check if the device is already online. If it is - * the reference count needs to be corrected since we - * didn't obtain a reference in ccw_device_set_online. - */ - if (cdev->private->state != DEV_STATE_NOT_OPER && - cdev->private->state != DEV_STATE_OFFLINE && - cdev->private->state != DEV_STATE_BOXED) - get_device(&cdev->dev); + rc = ccw_device_register(cdev); + if (rc) { + /* Release online reference. */ + put_device(&cdev->dev); + goto out_schedule; + } + if (atomic_dec_and_test(&ccw_device_init_count)) + wake_up(&ccw_device_init_wq); return 0; } io_subchannel_init_fields(sch); @@ -1580,88 +1573,102 @@ out: } #ifdef CONFIG_CCW_CONSOLE -static struct ccw_device console_cdev; -static struct ccw_device_private console_private; -static int console_cdev_in_use; - -static DEFINE_SPINLOCK(ccw_console_lock); - -spinlock_t * cio_get_console_lock(void) -{ - return &ccw_console_lock; -} - static int ccw_device_console_enable(struct ccw_device *cdev, struct subchannel *sch) { - struct io_subchannel_private *io_priv = cio_get_console_priv(); int rc; - /* Attach subchannel private data. */ - memset(io_priv, 0, sizeof(*io_priv)); - set_io_private(sch, io_priv); io_subchannel_init_fields(sch); rc = cio_commit_config(sch); if (rc) return rc; sch->driver = &io_subchannel_driver; - /* Initialize the ccw_device structure. */ - cdev->dev.parent= &sch->dev; sch_set_cdev(sch, cdev); io_subchannel_recog(cdev, sch); /* Now wait for the async. recognition to come to an end. */ spin_lock_irq(cdev->ccwlock); while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - rc = -EIO; - if (cdev->private->state != DEV_STATE_OFFLINE) + ccw_device_wait_idle(cdev); + + /* Hold on to an extra reference while device is online. */ + get_device(&cdev->dev); + rc = ccw_device_online(cdev); + if (rc) goto out_unlock; - ccw_device_online(cdev); + while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - if (cdev->private->state != DEV_STATE_ONLINE) - goto out_unlock; - rc = 0; + ccw_device_wait_idle(cdev); + + if (cdev->private->state == DEV_STATE_ONLINE) + cdev->online = 1; + else + rc = -EIO; out_unlock: spin_unlock_irq(cdev->ccwlock); + if (rc) /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return rc; } -struct ccw_device * -ccw_device_probe_console(void) +struct ccw_device *ccw_device_probe_console(void) { + struct io_subchannel_private *io_priv; + struct ccw_device *cdev; struct subchannel *sch; int ret; - if (xchg(&console_cdev_in_use, 1) != 0) - return ERR_PTR(-EBUSY); sch = cio_probe_console(); - if (IS_ERR(sch)) { - console_cdev_in_use = 0; - return (void *) sch; + if (IS_ERR(sch)) + return ERR_CAST(sch); + + io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); + if (!io_priv) { + put_device(&sch->dev); + return ERR_PTR(-ENOMEM); } - memset(&console_cdev, 0, sizeof(struct ccw_device)); - memset(&console_private, 0, sizeof(struct ccw_device_private)); - console_cdev.private = &console_private; - console_private.cdev = &console_cdev; - console_private.int_class = IRQIO_CIO; - ret = ccw_device_console_enable(&console_cdev, sch); + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) { + put_device(&sch->dev); + kfree(io_priv); + return cdev; + } + set_io_private(sch, io_priv); + ret = ccw_device_console_enable(cdev, sch); if (ret) { - cio_release_console(); - console_cdev_in_use = 0; + set_io_private(sch, NULL); + put_device(&sch->dev); + put_device(&cdev->dev); + kfree(io_priv); return ERR_PTR(ret); } - console_cdev.online = 1; - return &console_cdev; + return cdev; +} + +/** + * ccw_device_wait_idle() - busy wait for device to become idle + * @cdev: ccw device + * + * Poll until activity control is zero, that is, no function or data + * transfer is pending/active. + * Called with device lock being held. + */ +void ccw_device_wait_idle(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + while (1) { + cio_tsch(sch); + if (sch->schib.scsw.cmd.actl == 0) + break; + udelay_simple(100); + } } static int ccw_device_pm_restore(struct device *dev); -int ccw_device_force_console(void) +int ccw_device_force_console(struct ccw_device *cdev) { - if (!console_cdev_in_use) - return -ENODEV; - return ccw_device_pm_restore(&console_cdev.dev); + return ccw_device_pm_restore(&cdev->dev); } EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 7d4ecb65db00..8d1d29873172 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -81,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev) cdev->private->state == DEV_STATE_BOXED); } -extern wait_queue_head_t ccw_device_init_wq; -extern atomic_t ccw_device_init_count; int __init io_subchannel_init(void); void io_subchannel_recog_done(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index c77b6e06bf64..4845d64f2842 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -704,9 +704,9 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { struct subchannel *sch = to_subchannel(cdev->dev.parent); - struct channel_path_desc_fmt1 desc; + struct channel_path *chp; struct chp_id chpid; - int mdc = 0, ret, i; + int mdc = 0, i; /* Adjust requested path mask to excluded varied off paths. */ if (mask) @@ -719,14 +719,20 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) if (!(mask & (0x80 >> i))) continue; chpid.id = sch->schib.pmcw.chpid[i]; - ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc); - if (ret) - return ret; - if (!desc.f) + chp = chpid_to_chp(chpid); + if (!chp) + continue; + + mutex_lock(&chp->lock); + if (!chp->desc_fmt1.f) { + mutex_unlock(&chp->lock); return 0; - if (!desc.r) + } + if (!chp->desc_fmt1.r) mdc = 1; - mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc; + mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) : + chp->desc_fmt1.mdc; + mutex_unlock(&chp->lock); } return mdc; diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 65d13e38803f..5a999084a229 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -17,7 +17,7 @@ struct idset { static inline unsigned long bitmap_size(int num_ssid, int num_id) { - return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); + return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); } static struct idset *idset_new(int num_ssid, int num_id) diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index bcf20f3aa51b..46ec25632e8b 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale) goto out; scmdrv = to_scm_drv(scmdev->dev.driver); if (changed && scmdrv->notify) - scmdrv->notify(scmdev); + scmdrv->notify(scmdev, SCM_CHANGE); out: device_unlock(&scmdev->dev); if (changed) @@ -297,6 +297,22 @@ int scm_update_information(void) return ret; } +static int scm_dev_avail(struct device *dev, void *unused) +{ + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + struct scm_device *scmdev = to_scm_dev(dev); + + if (dev->driver && scmdrv->notify) + scmdrv->notify(scmdev, SCM_AVAIL); + + return 0; +} + +int scm_process_availability_information(void) +{ + return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); +} + static int __init scm_init(void) { int ret; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d87961d4c0de..6ccb7457746b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -769,6 +769,7 @@ struct qeth_card { unsigned long thread_start_mask; unsigned long thread_allowed_mask; unsigned long thread_running_mask; + struct task_struct *recovery_task; spinlock_t ip_lock; struct list_head ip_list; struct list_head *ip_tbd_list; @@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list; extern struct kmem_cache *qeth_core_header_cache; extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; +void qeth_set_recovery_task(struct qeth_card *); +void qeth_clear_recovery_task(struct qeth_card *); void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); int qeth_threads_running(struct qeth_card *, unsigned long); int qeth_wait_for_threads(struct qeth_card *, unsigned long); @@ -916,6 +919,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, void *reply_param); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); +int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int, int, int); int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d8cdff81813..451f92020599 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) return "n/a"; } +void qeth_set_recovery_task(struct qeth_card *card) +{ + card->recovery_task = current; +} +EXPORT_SYMBOL_GPL(qeth_set_recovery_task); + +void qeth_clear_recovery_task(struct qeth_card *card) +{ + card->recovery_task = NULL; +} +EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); + +static bool qeth_is_recovery_task(const struct qeth_card *card) +{ + return card->recovery_task == current; +} + void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { @@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) { + if (qeth_is_recovery_task(card)) + return 0; return wait_event_interruptible(card->wait_q, qeth_threads_running(card, threads) == 0); } @@ -3679,6 +3698,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); +int qeth_get_elements_for_frags(struct sk_buff *skb) +{ + int cnt, length, e, elements = 0; + struct skb_frag_struct *frag; + char *data; + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + data = (char *)page_to_phys(skb_frag_page(frag)) + + frag->page_offset; + length = frag->size; + e = PFN_UP((unsigned long)data + length - 1) - + PFN_DOWN((unsigned long)data); + elements += e; + } + return elements; +} +EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); + int qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb, int elems) { @@ -3686,7 +3724,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - PFN_DOWN((unsigned long)skb->data); - elements_needed += skb_shinfo(skb)->nr_frags; + elements_needed += qeth_get_elements_for_frags(skb); + if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", @@ -3771,12 +3810,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; - buffer->element[element].addr = (char *) - page_to_phys(skb_frag_page(frag)) - + frag->page_offset; - buffer->element[element].length = frag->size; - buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; - element++; + data = (char *)page_to_phys(skb_frag_page(frag)) + + frag->page_offset; + length = frag->size; + while (length > 0) { + length_here = PAGE_SIZE - + ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + length -= length_here; + data += length_here; + element++; + } } if (buffer->element[element - 1].eflags) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..155b101bd730 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); + qeth_set_recovery_task(card); __qeth_l2_set_offline(card->gdev, 1); rc = __qeth_l2_set_online(card->gdev, 1); if (!rc) @@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); } + qeth_clear_recovery_task(card); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 091ca0efa1c5..1f7edf1b26c3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, return rc; } -static void qeth_l3_correct_routing_type(struct qeth_card *card, +static int qeth_l3_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, enum qeth_prot_versions prot) { if (card->info.type == QETH_CARD_TYPE_IQD) { @@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, case PRIMARY_CONNECTOR: case SECONDARY_CONNECTOR: case MULTICAST_ROUTER: - return; + return 0; default: goto out_inval; } @@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, case NO_ROUTER: case PRIMARY_ROUTER: case SECONDARY_ROUTER: - return; + return 0; case MULTICAST_ROUTER: if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) - return; + return 0; default: goto out_inval; } } out_inval: *type = NO_ROUTER; + return -EINVAL; } int qeth_l3_setrouting_v4(struct qeth_card *card) @@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) QETH_CARD_TEXT(card, 3, "setrtg4"); - qeth_l3_correct_routing_type(card, &card->options.route4.type, + rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, QETH_PROT_IPV4); + if (rc) + return rc; rc = qeth_l3_send_setrouting(card, card->options.route4.type, QETH_PROT_IPV4); @@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) if (!qeth_is_supported(card, IPA_IPV6)) return 0; - qeth_l3_correct_routing_type(card, &card->options.route6.type, + rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, QETH_PROT_IPV6); + if (rc) + return rc; rc = qeth_l3_send_setrouting(card, card->options.route6.type, QETH_PROT_IPV6); @@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) tcp_hdr(skb)->doff * 4; int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); - elements += skb_shinfo(skb)->nr_frags; + + elements += qeth_get_elements_for_frags(skb); + return elements; } @@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) rc = -ENODEV; goto out_remove; } - qeth_trace_features(card); if (!card->dev && qeth_l3_setup_netdev(card)) { rc = -ENODEV; @@ -3425,6 +3431,7 @@ contin: qeth_l3_set_multicast_list(card->dev); rtnl_unlock(); } + qeth_trace_features(card); /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); mutex_unlock(&card->conf_mutex); @@ -3508,6 +3515,7 @@ static int qeth_l3_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); + qeth_set_recovery_task(card); __qeth_l3_set_offline(card->gdev, 1); rc = __qeth_l3_set_online(card->gdev, 1); if (!rc) @@ -3518,6 +3526,7 @@ static int qeth_l3_recover(void *ptr) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); } + qeth_clear_recovery_task(card); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index ebc379486267..e70af2406ff9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, rc = qeth_l3_setrouting_v6(card); } out: + if (rc) + route->type = old_route_type; mutex_unlock(&card->conf_mutex); return rc ? rc : count; } diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 1a9d1e3ce64c..c1441ed282eb 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c @@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void __init reset_one_i2c(struct bbc_i2c_bus *bp) +static void reset_one_i2c(struct bbc_i2c_bus *bp) { writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); writeb(bp->own, bp->i2c_control_regs + 0x1); @@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp) writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); } -static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) +static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index) { struct bbc_i2c_bus *bp; struct device_node *dp; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da434..90bc7bd00966 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) fc_exch_init(lport); fc_rport_init(lport); fc_disc_init(lport); + fc_disc_config(lport, lport); return 0; } @@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev, } ctlr = bnx2fc_to_ctlr(interface); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); interface->vlan_id = vlan_id; interface->timer_work_queue = @@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev, goto ifput_err; } - lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); + lport = bnx2fc_if_create(interface, &cdev->dev, 0); if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); @@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev, /* Make this master N_port */ ctlr->lp = lport; - cdev = fcoe_ctlr_to_ctlr_dev(ctlr); - if (link_state == BNX2FC_CREATE_LINK_UP) cdev->enabled = FCOE_CTLR_ENABLED; else diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c70..9bfdc9a3f897 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) { struct net_device *netdev = fcoe->netdev; struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); rtnl_lock(); if (!fcoe->removed) @@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) /* tear-down the FCoE controller */ fcoe_ctlr_destroy(fip); scsi_host_put(fip->lp->host); - fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); } @@ -2194,6 +2192,8 @@ out_nodev: */ static void fcoe_destroy_work(struct work_struct *work) { + struct fcoe_ctlr_device *cdev; + struct fcoe_ctlr *ctlr; struct fcoe_port *port; struct fcoe_interface *fcoe; struct Scsi_Host *shost; @@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work) mutex_lock(&fcoe_config_mutex); fcoe = port->priv; + ctlr = fcoe_to_ctlr(fcoe); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + fcoe_if_destroy(port->lport); fcoe_interface_cleanup(fcoe); mutex_unlock(&fcoe_config_mutex); + + fcoe_ctlr_device_delete(cdev); } /** @@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, rc = -EIO; rtnl_unlock(); fcoe_interface_cleanup(fcoe); - goto out_nortnl; + mutex_unlock(&fcoe_config_mutex); + fcoe_ctlr_device_delete(ctlr_dev); + goto out; } /* Make this the "master" N_Port */ @@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, out_nodev: rtnl_unlock(); -out_nortnl: mutex_unlock(&fcoe_config_mutex); +out: return rc; } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da2..a76247201be5 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -2815,6 +2815,47 @@ unlock: } /** + * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode + * @lport: The local port to be (re)configured + * @fip: The FCoE controller whose mode is changing + * @fip_mode: The new fip mode + * + * Note that the we shouldn't be changing the libfc discovery settings + * (fc_disc_config) while an lport is going through the libfc state + * machine. The mode can only be changed when a fcoe_ctlr device is + * disabled, so that should ensure that this routine is only called + * when nothing is happening. + */ +void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, + enum fip_state fip_mode) +{ + void *priv; + + WARN_ON(lport->state != LPORT_ST_RESET && + lport->state != LPORT_ST_DISABLED); + + if (fip_mode == FIP_MODE_VN2VN) { + lport->rport_priv_size = sizeof(struct fcoe_rport); + lport->point_to_multipoint = 1; + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; + lport->tt.disc_start = fcoe_ctlr_disc_start; + lport->tt.disc_stop = fcoe_ctlr_disc_stop; + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; + priv = fip; + } else { + lport->rport_priv_size = 0; + lport->point_to_multipoint = 0; + lport->tt.disc_recv_req = NULL; + lport->tt.disc_start = NULL; + lport->tt.disc_stop = NULL; + lport->tt.disc_stop_final = NULL; + priv = lport; + } + + fc_disc_config(lport, priv); +} + +/** * fcoe_libfc_config() - Sets up libfc related properties for local port * @lport: The local port to configure libfc for * @fip: The FCoE controller in use by the local port @@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, fc_exch_init(lport); fc_elsct_init(lport); fc_lport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) - lport->rport_priv_size = sizeof(struct fcoe_rport); fc_rport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) { - lport->point_to_multipoint = 1; - lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; - lport->tt.disc_start = fcoe_ctlr_disc_start; - lport->tt.disc_stop = fcoe_ctlr_disc_stop; - lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; - mutex_init(&lport->disc.disc_mutex); - INIT_LIST_HEAD(&lport->disc.rports); - lport->disc.priv = fip; - } else { - fc_disc_init(lport); - } + fc_disc_init(lport); + fcoe_ctlr_mode_set(lport, fip, fip->mode); return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config); @@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected); void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fc_lport *lport = ctlr->lp; mutex_lock(&ctlr->ctlr_mutex); switch (ctlr_dev->mode) { @@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) } mutex_unlock(&ctlr->ctlr_mutex); + + fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); } EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b9..d0fa4b6c551f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) sdev->allow_restart = 1; blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } - scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); return 0; } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a1060..2197b57fb225 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) ipr_trace; } - list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; @@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + if (ioa_cfg->intr_flag == IPR_USE_MSIX) + rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + else + rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); if (rc) { dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); return rc; @@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - free_irq(pdev->irq, ioa_cfg); + if (ioa_cfg->intr_flag == IPR_USE_MSIX) + free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); + else + free_irq(pdev->irq, ioa_cfg); LEAVE; @@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); flush_work(&ioa_cfg->work_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock(&ipr_driver_lock); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557c..880a9068ca12 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport) } /** - * fc_disc_init() - Initialize the discovery layer for a local port - * @lport: The local port that needs the discovery layer to be initialized + * fc_disc_config() - Configure the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be configured + * @priv: Private data structre for users of the discovery layer */ -int fc_disc_init(struct fc_lport *lport) +void fc_disc_config(struct fc_lport *lport, void *priv) { - struct fc_disc *disc; + struct fc_disc *disc = &lport->disc; if (!lport->tt.disc_start) lport->tt.disc_start = fc_disc_start; @@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport) lport->tt.disc_recv_req = fc_disc_recv_req; disc = &lport->disc; + + disc->priv = priv; +} +EXPORT_SYMBOL(fc_disc_config); + +/** + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized + */ +void fc_disc_init(struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); mutex_init(&disc->disc_mutex); INIT_LIST_HEAD(&disc->rports); - - disc->priv = lport; - - return 0; } EXPORT_SYMBOL(fc_disc_init); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da5016..55cbd0180159 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + /* Handle vacant phy - rest of dr data is not valid so skip it */ + if (phy->phy_state == PHY_VACANT) { + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + phy->attached_dev_type = NO_DEVICE; + if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { + phy->phy_id = phy_id; + goto skip; + } else + goto out; + } + phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; @@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); @@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single) if (!disc_req) return -ENOMEM; - disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e952..d43faf34c1e2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *temp_hrqe; struct lpfc_rqe *temp_drqe; struct lpfc_register doorbell; - int put_index = hq->host_index; + int put_index; /* sanity check on queue memory */ if (unlikely(!hq) || unlikely(!dq)) return -ENOMEM; + put_index = hq->host_index; temp_hrqe = hq->qe[hq->host_index].rqe; temp_drqe = dq->qe[dq->host_index].rqe; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1eb..b3db9dcc2619 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) "Timer for the VP[%d] has stopped\n", vha->vp_idx); } - /* No pending activities shall be there on the vha now */ - if (ql2xextended_error_logging & ql_dbg_user) - msleep(random32()%10); /* Just to see if something falls on - * the net we have placed below */ - BUG_ON(atomic_read(&vha->vref_count)); qla2x00_free_fcports(vha); diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32a..fbc305f1c87c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,6 +15,7 @@ * | Mailbox commands | 0x115b | 0x111a-0x111b | * | | | 0x112c-0x112e | * | | | 0x113a | + * | | | 0x1155-0x1158 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | * | | | 0x2016 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | @@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, void *ring; } aq, *aqp; - if (!ha->tgt.atio_q_length) + if (!ha->tgt.atio_ring) return ptr; num_queues = 1; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772b..65c5ff75936b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -863,7 +863,6 @@ typedef struct { #define MBX_1 BIT_1 #define MBX_0 BIT_0 -#define RNID_TYPE_SET_VERSION 0x9 #define RNID_TYPE_ASIC_TEMP 0xC /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f17..b310fa97b545 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -358,9 +358,6 @@ extern int qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); extern int -qla2x00_set_driver_version(scsi_qla_host_t *, char *); - -extern int qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, uint16_t, uint16_t, uint16_t, uint16_t); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a1335..b59203393cb2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) qla24xx_read_fcp_prio_cfg(vha); - qla2x00_set_driver_version(vha, QLA2XXX_VERSION); - return (rval); } @@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mq_size += ha->max_rsp_queues * (rsp->length * sizeof(response_t)); } - if (ha->tgt.atio_q_length) + if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4fa..43345af56431 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) return rval; } -int -qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - int len; - uint16_t dwlen; - uint8_t *str; - dma_addr_t str_dma; - struct qla_hw_data *ha = vha->hw; - - if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) - return QLA_FUNCTION_FAILED; - - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, - "Entered %s.\n", __func__); - - str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); - if (!str) { - ql_log(ql_log_warn, vha, 0x1156, - "Failed to allocate driver version param.\n"); - return QLA_MEMORY_ALLOC_FAILED; - } - - memcpy(str, "\x7\x3\x11\x0", 4); - dwlen = str[0]; - len = dwlen * sizeof(uint32_t) - 4; - memset(str + 4, 0, len); - if (len > strlen(version)) - len = strlen(version); - memcpy(str + 4, version, len); - - mcp->mb[0] = MBC_SET_RNID_PARAMS; - mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; - mcp->mb[2] = MSW(LSD(str_dma)); - mcp->mb[3] = LSW(LSD(str_dma)); - mcp->mb[6] = MSW(MSD(str_dma)); - mcp->mb[7] = LSW(MSD(str_dma)); - mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - - if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x1157, - "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); - } else { - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, - "Done %s.\n", __func__); - } - - dma_pool_free(ha->s_dma_pool, str, str_dma); - - return rval; -} - static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) { diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e33..ec54036d1e12 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.04.00.08-k" +#define QLA2XXX_VERSION "8.04.00.13-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 4 diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af68..2a32036a9404 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev) tpnt->disk = disk; disk->private_data = &tpnt->driver; disk->queue = SDp->request_queue; + /* SCSI tape doesn't register this gendisk via add_disk(). Manually + * take queue reference that release_disk() expects. */ + if (!blk_get_queue(disk->queue)) + goto out_put_disk; tpnt->driver = &st_template; tpnt->device = SDp; @@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev) idr_preload_end(); if (error < 0) { pr_warn("st: idr allocation failed: %d\n", error); - goto out_put_disk; + goto out_put_queue; } tpnt->index = error; sprintf(disk->disk_name, "st%d", tpnt->index); @@ -4211,6 +4215,8 @@ out_remove_devs: spin_lock(&st_index_lock); idr_remove(&st_index_idr, tpnt->index); spin_unlock(&st_index_lock); +out_put_queue: + blk_put_queue(disk->queue); out_put_disk: put_disk(disk); kfree(tpnt); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f80eee74a311..2be0de920d67 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers" config SPI_ALTERA tristate "Altera SPI Controller" + depends on GENERIC_HARDIRQS select SPI_BITBANG help This is the driver for the Altera SPI Controller. @@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA config SPI_PXA2XX tristate "PXA2xx SSP SPI master" - depends on ARCH_PXA || PCI || ACPI + depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS select PXA_SSP if ARCH_PXA help This enables using a PXA2xx or Sodaville SSP port as a SPI master diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9578af782a77..d7df435d962e 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, static int bcm63xx_spi_setup(struct spi_device *spi) { struct bcm63xx_spi *bs; - int ret; bs = spi_master_get_devdata(spi->master); @@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) default: dev_err(dev, "unsupported MSG_CTL width: %d\n", bs->msg_ctl_width); - goto out_clk_disable; + goto out_err; } /* Initialize hardware */ diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 89480b281d74..3e490ee7f275 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c @@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, for (i = count; i > 0; i--) { data = tx_buf ? *tx_buf++ : 0; - if (len == EOFBYTE) + if (len == EOFBYTE && t->cs_change) setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); out_8(&fifo->txdata_8, data); len--; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 90b27a3508a6..810413883c79 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; - ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e862ab8853aa..4188b2faac5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) { struct s3c64xx_spi_driver_data *sdd = data; struct spi_master *spi = sdd->master; - unsigned int val; + unsigned int val, clr = 0; - val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); + val = readl(sdd->regs + S3C64XX_SPI_STATUS); - val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | - S3C64XX_SPI_PND_RX_UNDERRUN_CLR | - S3C64XX_SPI_PND_TX_OVERRUN_CLR | - S3C64XX_SPI_PND_TX_UNDERRUN_CLR; - - writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); - - if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) + if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { + clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; dev_err(&spi->dev, "RX overrun\n"); - if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) + } + if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { + clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; dev_err(&spi->dev, "RX underrun\n"); - if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) + } + if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { + clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; dev_err(&spi->dev, "TX overrun\n"); - if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) + } + if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { + clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; dev_err(&spi->dev, "TX underrun\n"); + } + + /* Clear the pending irq by setting and then clearing it */ + writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); + writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); return IRQ_HANDLED; } @@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) writel(0, regs + S3C64XX_SPI_MODE_CFG); writel(0, regs + S3C64XX_SPI_PACKET_CNT); - /* Clear any irq pending bits */ - writel(readl(regs + S3C64XX_SPI_PENDING_CLR), - regs + S3C64XX_SPI_PENDING_CLR); + /* Clear any irq pending bits, should set and clear the bits */ + val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | + S3C64XX_SPI_PND_RX_UNDERRUN_CLR | + S3C64XX_SPI_PND_TX_OVERRUN_CLR | + S3C64XX_SPI_PND_TX_UNDERRUN_CLR; + writel(val, regs + S3C64XX_SPI_PENDING_CLR); + writel(0, regs + S3C64XX_SPI_PENDING_CLR); writel(0, regs + S3C64XX_SPI_SWAP_CFG); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index b8698b389ef3..a829563f4713 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi) return 0; } -static int tegra_slink_prepare_transfer(struct spi_master *master) -{ - struct tegra_slink_data *tspi = spi_master_get_devdata(master); - - return pm_runtime_get_sync(tspi->dev); -} - -static int tegra_slink_unprepare_transfer(struct spi_master *master) -{ - struct tegra_slink_data *tspi = spi_master_get_devdata(master); - - pm_runtime_put(tspi->dev); - return 0; -} - static int tegra_slink_transfer_one_message(struct spi_master *master, struct spi_message *msg) { @@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, msg->status = 0; msg->actual_length = 0; + ret = pm_runtime_get_sync(tspi->dev); + if (ret < 0) { + dev_err(tspi->dev, "runtime get failed: %d\n", ret); + goto done; + } + single_xfer = list_is_singular(&msg->transfers); list_for_each_entry(xfer, &msg->transfers, transfer_list) { INIT_COMPLETION(tspi->xfer_completion); @@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, exit: tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); + pm_runtime_put(tspi->dev); +done: msg->status = ret; spi_finalize_current_message(master); return ret; @@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev) /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = tegra_slink_setup; - master->prepare_transfer_hardware = tegra_slink_prepare_transfer; master->transfer_one_message = tegra_slink_transfer_one_message; - master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; master->num_chipselect = MAX_CHIP_SELECT; master->bus_num = -1; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f996c600eb8c..004b10f184d4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work) /* Lock queue and check for queue work */ spin_lock_irqsave(&master->queue_lock, flags); if (list_empty(&master->queue) || !master->running) { - if (master->busy && master->unprepare_transfer_hardware) { - ret = master->unprepare_transfer_hardware(master); - if (ret) { - spin_unlock_irqrestore(&master->queue_lock, flags); - dev_err(&master->dev, - "failed to unprepare transfer hardware\n"); - return; - } + if (!master->busy) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return; } master->busy = false; spin_unlock_irqrestore(&master->queue_lock, flags); + if (master->unprepare_transfer_hardware && + master->unprepare_transfer_hardware(master)) + dev_err(&master->dev, + "failed to unprepare transfer hardware\n"); return; } @@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master) acpi_status status; acpi_handle handle; - handle = ACPI_HANDLE(&master->dev); + handle = ACPI_HANDLE(master->dev.parent); if (!handle) return; diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 4c0f6d883dd3..7b0bce936762 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) return 0; } } + +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) +{ + u32 pmu_ctl = 0; + + switch (cc->dev->bus->chip_id) { + case 0x4322: + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); + if (spuravoid == 1) + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); + else + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); + pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; + break; + case 43222: + /* TODO: BCM43222 requires updating PLLs too */ + return; + default: + ssb_printk(KERN_ERR PFX + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", + cc->dev->bus->chip_id); + return; + } + + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); +} +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index 192cf088f834..57b451904791 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -947,12 +947,13 @@ static int dt9812_di_rinsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; + unsigned int channel = CR_CHAN(insn->chanspec); int n; u8 bits = 0; dt9812_digital_in(devpriv->slot, &bits); for (n = 0; n < insn->n; n++) - data[n] = ((1 << insn->chanspec) & bits) != 0; + data[n] = ((1 << channel) & bits) != 0; return n; } @@ -961,12 +962,13 @@ static int dt9812_do_winsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; + unsigned int channel = CR_CHAN(insn->chanspec); int n; u8 bits = 0; dt9812_digital_out_shadow(devpriv->slot, &bits); for (n = 0; n < insn->n; n++) { - u8 mask = 1 << insn->chanspec; + u8 mask = 1 << channel; bits &= ~mask; if (data[n]) @@ -981,13 +983,13 @@ static int dt9812_ai_rinsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; + unsigned int channel = CR_CHAN(insn->chanspec); int n; for (n = 0; n < insn->n; n++) { u16 value = 0; - dt9812_analog_in(devpriv->slot, insn->chanspec, &value, - DT9812_GAIN_1); + dt9812_analog_in(devpriv->slot, channel, &value, DT9812_GAIN_1); data[n] = value; } return n; @@ -998,12 +1000,13 @@ static int dt9812_ao_rinsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; + unsigned int channel = CR_CHAN(insn->chanspec); int n; u16 value; for (n = 0; n < insn->n; n++) { value = 0; - dt9812_analog_out_shadow(devpriv->slot, insn->chanspec, &value); + dt9812_analog_out_shadow(devpriv->slot, channel, &value); data[n] = value; } return n; @@ -1014,10 +1017,11 @@ static int dt9812_ao_winsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; + unsigned int channel = CR_CHAN(insn->chanspec); int n; for (n = 0; n < insn->n; n++) - dt9812_analog_out(devpriv->slot, insn->chanspec, data[n]); + dt9812_analog_out(devpriv->slot, channel, data[n]); return n; } diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 81a1fe661579..71a73ec5af8d 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -1483,7 +1483,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) case TRIG_NONE: /* continous acquisition */ devpriv->ai_continous = 1; - devpriv->ai_sample_count = 0; + devpriv->ai_sample_count = 1; break; } diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index 1a0062a04456..6aac1f60bc42 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -730,10 +730,14 @@ static void usbduxsub_ao_IsocIrq(struct urb *urb) static int usbduxsub_start(struct usbduxsub *usbduxsub) { int errcode = 0; - uint8_t local_transfer_buffer[16]; + uint8_t *local_transfer_buffer; + + local_transfer_buffer = kmalloc(1, GFP_KERNEL); + if (!local_transfer_buffer) + return -ENOMEM; /* 7f92 to zero */ - local_transfer_buffer[0] = 0; + *local_transfer_buffer = 0; errcode = usb_control_msg(usbduxsub->usbdev, /* create a pipe for a control transfer */ usb_sndctrlpipe(usbduxsub->usbdev, 0), @@ -751,22 +755,25 @@ static int usbduxsub_start(struct usbduxsub *usbduxsub) 1, /* Timeout */ BULK_TIMEOUT); - if (errcode < 0) { + if (errcode < 0) dev_err(&usbduxsub->interface->dev, "comedi_: control msg failed (start)\n"); - return errcode; - } - return 0; + + kfree(local_transfer_buffer); + return errcode; } static int usbduxsub_stop(struct usbduxsub *usbduxsub) { int errcode = 0; + uint8_t *local_transfer_buffer; - uint8_t local_transfer_buffer[16]; + local_transfer_buffer = kmalloc(1, GFP_KERNEL); + if (!local_transfer_buffer) + return -ENOMEM; /* 7f92 to one */ - local_transfer_buffer[0] = 1; + *local_transfer_buffer = 1; errcode = usb_control_msg(usbduxsub->usbdev, usb_sndctrlpipe(usbduxsub->usbdev, 0), /* bRequest, "Firmware" */ @@ -781,12 +788,12 @@ static int usbduxsub_stop(struct usbduxsub *usbduxsub) 1, /* Timeout */ BULK_TIMEOUT); - if (errcode < 0) { + if (errcode < 0) dev_err(&usbduxsub->interface->dev, "comedi_: control msg failed (stop)\n"); - return errcode; - } - return 0; + + kfree(local_transfer_buffer); + return errcode; } static int usbduxsub_upload(struct usbduxsub *usbduxsub, diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 4bf5dd094dc9..1ba0e3df492d 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -436,10 +436,14 @@ static void usbduxfastsub_ai_Irq(struct urb *urb) static int usbduxfastsub_start(struct usbduxfastsub_s *udfs) { int ret; - unsigned char local_transfer_buffer[16]; + unsigned char *local_transfer_buffer; + + local_transfer_buffer = kmalloc(1, GFP_KERNEL); + if (!local_transfer_buffer) + return -ENOMEM; /* 7f92 to zero */ - local_transfer_buffer[0] = 0; + *local_transfer_buffer = 0; /* bRequest, "Firmware" */ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, @@ -450,22 +454,25 @@ static int usbduxfastsub_start(struct usbduxfastsub_s *udfs) local_transfer_buffer, 1, /* Length */ EZTIMEOUT); /* Timeout */ - if (ret < 0) { + if (ret < 0) dev_err(&udfs->interface->dev, "control msg failed (start)\n"); - return ret; - } - return 0; + kfree(local_transfer_buffer); + return ret; } static int usbduxfastsub_stop(struct usbduxfastsub_s *udfs) { int ret; - unsigned char local_transfer_buffer[16]; + unsigned char *local_transfer_buffer; + + local_transfer_buffer = kmalloc(1, GFP_KERNEL); + if (!local_transfer_buffer) + return -ENOMEM; /* 7f92 to one */ - local_transfer_buffer[0] = 1; + *local_transfer_buffer = 1; /* bRequest, "Firmware" */ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, @@ -474,13 +481,12 @@ static int usbduxfastsub_stop(struct usbduxfastsub_s *udfs) 0x0000, /* Index */ local_transfer_buffer, 1, /* Length */ EZTIMEOUT); /* Timeout */ - if (ret < 0) { + if (ret < 0) dev_err(&udfs->interface->dev, "control msg failed (stop)\n"); - return ret; - } - return 0; + kfree(local_transfer_buffer); + return ret; } static int usbduxfastsub_upload(struct usbduxfastsub_s *udfs, diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index d066351a71b2..a728c8fc32a2 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -681,7 +681,11 @@ static void usbduxsub_ao_IsocIrq(struct urb *urb) static int usbduxsub_start(struct usbduxsub *usbduxsub) { int errcode = 0; - uint8_t local_transfer_buffer[16]; + uint8_t *local_transfer_buffer; + + local_transfer_buffer = kmalloc(16, GFP_KERNEL); + if (!local_transfer_buffer) + return -ENOMEM; /* 7f92 to zero */ local_transfer_buffer[0] = 0; @@ -702,19 +706,22 @@ static int usbduxsub_start(struct usbduxsub *usbduxsub) 1, /* Timeout */ BULK_TIMEOUT); - if (errcode < 0) { + if (errcode < 0) dev_err(&usbduxsub->interface->dev, "comedi_: control msg failed (start)\n"); - return errcode; - } - return 0; + + kfree(local_transfer_buffer); + return errcode; } static int usbduxsub_stop(struct usbduxsub *usbduxsub) { int errcode = 0; + uint8_t *local_transfer_buffer; - uint8_t local_transfer_buffer[16]; + local_transfer_buffer = kmalloc(16, GFP_KERNEL); + if (!local_transfer_buffer) + return -ENOMEM; /* 7f92 to one */ local_transfer_buffer[0] = 1; @@ -732,12 +739,12 @@ static int usbduxsub_stop(struct usbduxsub *usbduxsub) 1, /* Timeout */ BULK_TIMEOUT); - if (errcode < 0) { + if (errcode < 0) dev_err(&usbduxsub->interface->dev, "comedi_: control msg failed (stop)\n"); - return errcode; - } - return 0; + + kfree(local_transfer_buffer); + return errcode; } static int usbduxsub_upload(struct usbduxsub *usbduxsub, diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c index 4b3a019409b5..b028b0d1317b 100644 --- a/drivers/staging/imx-drm/ipuv3-crtc.c +++ b/drivers/staging/imx-drm/ipuv3-crtc.c @@ -483,17 +483,6 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc, goto err_out; } - ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch, - IPU_IRQ_EOF); - ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, - "imx_drm", ipu_crtc); - if (ret < 0) { - dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); - goto err_out; - } - - disable_irq(ipu_crtc->irq); - return 0; err_out: ipu_put_resources(ipu_crtc); @@ -504,6 +493,7 @@ err_out: static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, struct ipu_client_platformdata *pdata) { + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ret = ipu_get_resources(ipu_crtc, pdata); @@ -522,6 +512,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, goto err_put_resources; } + ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch, + IPU_IRQ_EOF); + ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, + "imx_drm", ipu_crtc); + if (ret < 0) { + dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); + goto err_put_resources; + } + + disable_irq(ipu_crtc->irq); + return 0; err_put_resources: diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c index db1da28cecba..be26917a6896 100644 --- a/drivers/staging/tidspbridge/rmgr/drv.c +++ b/drivers/staging/tidspbridge/rmgr/drv.c @@ -76,37 +76,28 @@ int drv_insert_node_res_element(void *hnode, void *node_resource, struct node_res_object **node_res_obj = (struct node_res_object **)node_resource; struct process_context *ctxt = (struct process_context *)process_ctxt; - int status = 0; int retval; *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); - if (!*node_res_obj) { - status = -ENOMEM; - goto func_end; - } + if (!*node_res_obj) + return -ENOMEM; (*node_res_obj)->node = hnode; - retval = idr_get_new(ctxt->node_id, *node_res_obj, - &(*node_res_obj)->id); - if (retval == -EAGAIN) { - if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) { - pr_err("%s: OUT OF MEMORY\n", __func__); - status = -ENOMEM; - goto func_end; - } - - retval = idr_get_new(ctxt->node_id, *node_res_obj, - &(*node_res_obj)->id); + retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL); + if (retval >= 0) { + (*node_res_obj)->id = retval; + return 0; } - if (retval) { + + kfree(*node_res_obj); + + if (retval == -ENOSPC) { pr_err("%s: FAILED, IDR is FULL\n", __func__); - status = -EFAULT; + return -EFAULT; + } else { + pr_err("%s: OUT OF MEMORY\n", __func__); + return -ENOMEM; } -func_end: - if (status) - kfree(*node_res_obj); - - return status; } /* Release all Node resources and its context @@ -201,35 +192,26 @@ int drv_proc_insert_strm_res_element(void *stream_obj, struct strm_res_object **pstrm_res = (struct strm_res_object **)strm_res; struct process_context *ctxt = (struct process_context *)process_ctxt; - int status = 0; int retval; *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); - if (*pstrm_res == NULL) { - status = -EFAULT; - goto func_end; - } + if (*pstrm_res == NULL) + return -EFAULT; (*pstrm_res)->stream = stream_obj; - retval = idr_get_new(ctxt->stream_id, *pstrm_res, - &(*pstrm_res)->id); - if (retval == -EAGAIN) { - if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) { - pr_err("%s: OUT OF MEMORY\n", __func__); - status = -ENOMEM; - goto func_end; - } - - retval = idr_get_new(ctxt->stream_id, *pstrm_res, - &(*pstrm_res)->id); + retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL); + if (retval >= 0) { + (*pstrm_res)->id = retval; + return 0; } - if (retval) { + + if (retval == -ENOSPC) { pr_err("%s: FAILED, IDR is FULL\n", __func__); - status = -EPERM; + return -EPERM; + } else { + pr_err("%s: OUT OF MEMORY\n", __func__); + return -ENOMEM; } - -func_end: - return status; } static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt) diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 22918a106d73..d2479b766450 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -790,7 +790,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, WORD wBeaconInterval) if ((~uLowNextTBTT) < uLowRemain) qwTSF = ((qwTSF >> 32) + 1) << 32; - qwTSF = (qwTSF & 0xffffffff00000000UL) | + qwTSF = (qwTSF & 0xffffffff00000000ULL) | (u64)(uLowNextTBTT + uLowRemain); return (qwTSF); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index d5f53e1a74a2..a5063a6f64d9 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -669,8 +669,6 @@ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message) if (device->flags & DEVICE_FLAGS_OPENED) device_close(device->dev); - usb_put_dev(interface_to_usbdev(intf)); - return 0; } @@ -681,8 +679,6 @@ static int vt6656_resume(struct usb_interface *intf) if (!device || !device->dev) return -ENODEV; - usb_get_dev(interface_to_usbdev(intf)); - if (!(device->flags & DEVICE_FLAGS_OPENED)) device_open(device->dev); diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig index 73582705e8c5..5c3714530961 100644 --- a/drivers/staging/zcache/Kconfig +++ b/drivers/staging/zcache/Kconfig @@ -15,7 +15,7 @@ config RAMSTER depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y depends on NET # must ensure struct page is 8-byte aligned - select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT + select HAVE_ALIGNED_STRUCT_PAGE if !64BIT default n help RAMster allows RAM on other machines in a cluster to be utilized diff --git a/drivers/staging/zcache/ramster/tcp.c b/drivers/staging/zcache/ramster/tcp.c index aa2a1a763aa4..f6e1e5209d88 100644 --- a/drivers/staging/zcache/ramster/tcp.c +++ b/drivers/staging/zcache/ramster/tcp.c @@ -300,27 +300,22 @@ static u8 r2net_num_from_nn(struct r2net_node *nn) static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw) { - int ret = 0; + int ret; - do { - if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { - ret = -EAGAIN; - break; - } - spin_lock(&nn->nn_lock); - ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); - if (ret == 0) - list_add_tail(&nsw->ns_node_item, - &nn->nn_status_list); - spin_unlock(&nn->nn_lock); - } while (ret == -EAGAIN); + spin_lock(&nn->nn_lock); + ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC); + if (ret >= 0) { + nsw->ns_id = ret; + list_add_tail(&nsw->ns_node_item, &nn->nn_status_list); + } + spin_unlock(&nn->nn_lock); - if (ret == 0) { + if (ret >= 0) { init_waitqueue_head(&nsw->ns_wq); nsw->ns_sys_status = R2NET_ERR_NONE; nsw->ns_status = 0; + return 0; } - return ret; } diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index db0cf7c8adde..a0fc7b9eea65 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -166,6 +166,7 @@ static int chap_server_compute_md5( { char *endptr; unsigned long id; + unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; @@ -355,7 +356,9 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, &id, 1); + /* To handle both endiannesses */ + id_as_uchar = id; + sg_init_one(&sg, &id_as_uchar, 1); ret = crypto_hash_update(&desc, &sg, 1); if (ret < 0) { pr_err("crypto_hash_update() failed for id\n"); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index ff1c5ee352cb..cbe48ab41745 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -409,6 +409,7 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: @@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: @@ -491,6 +493,7 @@ static inline int core_alua_state_transition( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index bc02b018ae46..37ffc5bd2399 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -7,7 +7,7 @@ #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 -#define FD_MAX_SECTORS 1024 +#define FD_MAX_SECTORS 2048 #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 82e78d72fdb6..e992b27aa090 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, page, len, off); - while (len > 0 && data_len > 0) { + /* + * We only have one page of data in each sg element, + * we can not cross a page boundary. + */ + if (off + len > PAGE_SIZE) + goto fail; + + if (len > 0 && data_len > 0) { bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); @@ -940,9 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bio = NULL; } - len -= bytes; data_len -= bytes; - off = 0; } } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 290230de2c53..60d4b5185f32 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -464,8 +464,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: - if (!ops->execute_sync_cache) - return TCM_UNSUPPORTED_SCSI_OPCODE; + if (!ops->execute_sync_cache) { + size = 0; + cmd->execute_cmd = sbc_emulate_noop; + break; + } /* * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 9169d6a5d7e4..aac9d2727e3c 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -711,7 +711,8 @@ int core_tpg_register( if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { - kfree(se_tpg); + array_free(se_tpg->tpg_lun_list, + TRANSPORT_MAX_LUNS_PER_TPG); return -ENOMEM; } } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 2030b608136d..3243ea790eab 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1139,8 +1139,10 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) return ret; ret = target_check_reservation(cmd); - if (ret) + if (ret) { + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; return ret; + } ret = dev->transport->parse_cdb(cmd); if (ret) diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index 7b0bfa0e7a9c..3078c403b42d 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c @@ -143,22 +143,18 @@ static int dove_thermal_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->sensor = devm_request_and_ioremap(&pdev->dev, res); - if (!priv->sensor) { - dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); - return -EADDRNOTAVAIL; - } + priv->sensor = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->sensor)) + return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(&pdev->dev, "Failed to get platform resource\n"); return -ENODEV; } - priv->control = devm_request_and_ioremap(&pdev->dev, res); - if (!priv->control) { - dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); - return -EADDRNOTAVAIL; - } + priv->control = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->control)) + return PTR_ERR(priv->control); ret = dove_init_sensor(priv); if (ret) { diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index e04ebd8671ac..46568c078dee 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c @@ -476,7 +476,7 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) if (IS_ERR(th_zone->therm_dev)) { pr_err("Failed to register thermal zone device\n"); - ret = -EINVAL; + ret = PTR_ERR(th_zone->therm_dev); goto err_unregister; } th_zone->mode = THERMAL_DEVICE_ENABLED; diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c index 65cb4f09e8f6..e5500edb5285 100644 --- a/drivers/thermal/kirkwood_thermal.c +++ b/drivers/thermal/kirkwood_thermal.c @@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->sensor = devm_request_and_ioremap(&pdev->dev, res); - if (!priv->sensor) { - dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); - return -EADDRNOTAVAIL; - } + priv->sensor = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->sensor)) + return PTR_ERR(priv->sensor); thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, priv, &ops, NULL, 0, 0); diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 28f091994013..2cc5b6115e3e 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) struct device *dev = rcar_priv_to_dev(priv); int i; int ctemp, old, new; + int ret = -EINVAL; mutex_lock(&priv->lock); @@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) if (!ctemp) { dev_err(dev, "thermal sensor was broken\n"); - return -EINVAL; + goto err_out_unlock; } /* @@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); priv->ctemp = ctemp; - + ret = 0; +err_out_unlock: mutex_unlock(&priv->lock); - - return 0; + return ret; } static int rcar_thermal_get_temp(struct thermal_zone_device *zone, @@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) struct resource *res, *irq; int mres = 0; int i; + int ret = -ENODEV; int idle = IDLE_INTERVAL; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); @@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) /* * rcar_has_irq_support() will be enabled */ - common->base = devm_request_and_ioremap(dev, res); - if (!common->base) { - dev_err(dev, "Unable to ioremap thermal register\n"); - return -ENOMEM; - } + common->base = devm_ioremap_resource(dev, res); + if (IS_ERR(common->base)) + return PTR_ERR(common->base); /* enable temperature comparation */ rcar_thermal_common_write(common, ENR, 0x00030303); @@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) return -ENOMEM; } - priv->base = devm_request_and_ioremap(dev, res); - if (!priv->base) { - dev_err(dev, "Unable to ioremap priv register\n"); - return -ENOMEM; - } + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); priv->common = common; priv->id = i; @@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) idle); if (IS_ERR(priv->zone)) { dev_err(dev, "can't register thermal zone\n"); + ret = PTR_ERR(priv->zone); goto error_unregister; } @@ -460,7 +459,7 @@ error_unregister: rcar_thermal_for_each_priv(priv, common) thermal_zone_device_unregister(priv->zone); - return -ENODEV; + return ret; } static int rcar_thermal_remove(struct platform_device *pdev) diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 484b6a3c9b03..302909ccf183 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev, mxvar_sdriver, brd->idx + i, &pdev->dev); if (IS_ERR(tty_dev)) { retval = PTR_ERR(tty_dev); - for (i--; i >= 0; i--) + for (; i > 0; i--) tty_unregister_device(mxvar_sdriver, - brd->idx + i); + brd->idx + i - 1); goto err_relbrd; } } @@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void) tty_dev = tty_port_register_device(&brd->ports[i].port, mxvar_sdriver, brd->idx + i, NULL); if (IS_ERR(tty_dev)) { - for (i--; i >= 0; i--) + for (; i > 0; i--) tty_unregister_device(mxvar_sdriver, - brd->idx + i); + brd->idx + i - 1); for (i = 0; i < brd->info->nports; i++) tty_port_destroy(&brd->ports[i].port); free_irq(brd->irq, brd); diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250_core.c index 0efc815a4968..35f9c96aada9 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -301,7 +301,28 @@ static const struct serial8250_config uart_config[] = { }, [PORT_8250_CIR] = { .name = "CIR port" - } + }, + [PORT_ALTR_16550_F32] = { + .name = "Altera 16550 FIFO32", + .fifo_size = 32, + .tx_loadsz = 32, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + [PORT_ALTR_16550_F64] = { + .name = "Altera 16550 FIFO64", + .fifo_size = 64, + .tx_loadsz = 64, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, + [PORT_ALTR_16550_F128] = { + .name = "Altera 16550 FIFO128", + .fifo_size = 128, + .tx_loadsz = 128, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .flags = UART_CAP_FIFO | UART_CAP_AFE, + }, }; /* Uart divisor latch read */ @@ -3396,3 +3417,34 @@ module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); #endif MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); + +#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS +#ifndef MODULE +/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name + * working as well for the module options so we don't break people. We + * need to keep the names identical and the convenient macros will happily + * refuse to let us do that by failing the build with redefinition errors + * of global variables. So we stick them inside a dummy function to avoid + * those conflicts. The options still get parsed, and the redefined + * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive. + * + * This is hacky. I'm sorry. + */ +static void __used s8250_options(void) +{ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "8250_core." + + module_param_cb(share_irqs, ¶m_ops_uint, &share_irqs, 0644); + module_param_cb(nr_uarts, ¶m_ops_uint, &nr_uarts, 0644); + module_param_cb(skip_txen_test, ¶m_ops_uint, &skip_txen_test, 0644); +#ifdef CONFIG_SERIAL_8250_RSA + __module_param_call(MODULE_PARAM_PREFIX, probe_rsa, + ¶m_array_ops, .arr = &__param_arr_probe_rsa, + 0444, -1); +#endif +} +#else +MODULE_ALIAS("8250_core"); +#endif +#endif diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 791c5a77ec61..26e3a97ab157 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1554,6 +1554,7 @@ pci_wch_ch353_setup(struct serial_private *priv, #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d #define PCI_VENDOR_ID_WCH 0x4348 +#define PCI_DEVICE_ID_WCH_CH352_2S 0x3253 #define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 #define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 #define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 @@ -1571,6 +1572,7 @@ pci_wch_ch353_setup(struct serial_private *priv, /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 +#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 /* * Master list of serial port init/setup/exit quirks. @@ -1852,15 +1854,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { }, { .vendor = PCI_VENDOR_ID_PLX, - .device = PCI_DEVICE_ID_PLX_9050, - .subvendor = PCI_VENDOR_ID_PLX, - .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584, - .init = pci_plx9050_init, - .setup = pci_default_setup, - .exit = pci_plx9050_exit, - }, - { - .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_ROMULUS, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_ROMULUS, @@ -2180,6 +2173,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, + /* WCH CH352 2S card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, + .device = PCI_DEVICE_ID_WCH_CH352_2S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch353_setup, + }, /* * ASIX devices with FIFO bug */ @@ -3733,7 +3734,12 @@ static struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0, - pbn_b0_4_115200 }, + pbn_b2_4_115200 }, + /* Unknown card - subdevice 0x1588 */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_VENDOR_ID_PLX, + PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0, + pbn_b2_8_115200 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_KEYSPAN, PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, @@ -4791,6 +4797,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_VENDOR_ID_IBM, 0x0299, 0, 0, pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, + 0x1000, 0x0012, + 0, 0, pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, @@ -4869,6 +4879,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_bt_2_115200 }, + /* * Commtech, Inc. Fastcom adapters */ diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 2ef9537bcb2c..80fe91e64a52 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -33,6 +33,23 @@ config SERIAL_8250 Most people will say Y or M here, so that they can use serial mice, modems and similar devices connecting to the standard serial ports. +config SERIAL_8250_DEPRECATED_OPTIONS + bool "Support 8250_core.* kernel options (DEPRECATED)" + depends on SERIAL_8250 + default y + ---help--- + In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to + accept kernel parameters in both forms like 8250_core.nr_uarts=4 and + 8250.nr_uarts=4. We now renamed the module back to 8250, but if + anybody noticed in 3.7 and changed their userspace we still have to + keep the 8350_core.* options around until they revert the changes + they already did. + + If 8250 is built as a module, this adds 8250_core alias instead. + + If you did not notice yet and/or you have userspace from pre-3.7, it + is safe (and recommended) to say N here. + config SERIAL_8250_PNP bool "8250/16550 PNP device support" if EXPERT depends on SERIAL_8250 && PNP diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index a23838a4d535..36d68d054307 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -2,10 +2,10 @@ # Makefile for the 8250 serial device drivers. # -obj-$(CONFIG_SERIAL_8250) += 8250_core.o -8250_core-y := 8250.o -8250_core-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o -8250_core-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o +obj-$(CONFIG_SERIAL_8250) += 8250.o +8250-y := 8250_core.o +8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o +8250-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index cf9210db9fa9..7e7006fd404e 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -211,14 +211,14 @@ config SERIAL_SAMSUNG config SERIAL_SAMSUNG_UARTS_4 bool depends on PLAT_SAMSUNG - default y if !(CPU_S3C2410 || SERIAL_S3C2412 || CPU_S3C2440 || CPU_S3C2442) + default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442) help Internal node for the common case of 4 Samsung compatible UARTs config SERIAL_SAMSUNG_UARTS int depends on PLAT_SAMSUNG - default 6 if ARCH_S5P6450 + default 6 if CPU_S5P6450 default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416 default 3 help diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d4a7c241b751..3467462869ce 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -158,7 +158,7 @@ struct atmel_uart_port { }; static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; -static unsigned long atmel_ports_in_use; +static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); #ifdef SUPPORT_SYSRQ static struct console atmel_console; @@ -1769,15 +1769,14 @@ static int atmel_serial_probe(struct platform_device *pdev) if (ret < 0) /* port id not found in platform data nor device-tree aliases: * auto-enumerate it */ - ret = find_first_zero_bit(&atmel_ports_in_use, - sizeof(atmel_ports_in_use)); + ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); - if (ret > ATMEL_MAX_UART) { + if (ret >= ATMEL_MAX_UART) { ret = -ENODEV; goto err; } - if (test_and_set_bit(ret, &atmel_ports_in_use)) { + if (test_and_set_bit(ret, atmel_ports_in_use)) { /* port already in use */ ret = -EBUSY; goto err; @@ -1857,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev) /* "port" is allocated statically, so we shouldn't free it */ - clear_bit(port->line, &atmel_ports_in_use); + clear_bit(port->line, atmel_ports_in_use); clk_put(atmel_port->clk); diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 719594e5fc21..52a3ecd40421 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -235,7 +235,7 @@ static const char *bcm_uart_type(struct uart_port *port) */ static void bcm_uart_do_rx(struct uart_port *port) { - struct tty_port *port = &port->state->port; + struct tty_port *tty_port = &port->state->port; unsigned int max_count; /* limit number of char read in interrupt, should not be @@ -260,7 +260,7 @@ static void bcm_uart_do_rx(struct uart_port *port) bcm_uart_writel(port, val, UART_CTL_REG); port->icount.overrun++; - tty_insert_flip_char(port, 0, TTY_OVERRUN); + tty_insert_flip_char(tty_port, 0, TTY_OVERRUN); } if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) @@ -299,11 +299,11 @@ static void bcm_uart_do_rx(struct uart_port *port) if ((cstat & port->ignore_status_mask) == 0) - tty_insert_flip_char(port, c, flag); + tty_insert_flip_char(tty_port, c, flag); } while (--max_count); - tty_flip_buffer_push(port); + tty_flip_buffer_push(tty_port); } /* diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index c0e1fad51be7..018bad922554 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -550,7 +550,7 @@ static int mpc512x_psc_clock(struct uart_port *port, int enable) return 0; psc_num = (port->mapbase & 0xf00) >> 8; - snprintf(clk_name, sizeof(clk_name), "psc%d_clk", psc_num); + snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); psc_clk = clk_get(port->dev, clk_name); if (IS_ERR(psc_clk)) { dev_err(port->dev, "Failed to get PSC clock entry!\n"); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index d5874605682b..b025d5438275 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -241,6 +241,12 @@ static struct of_device_id of_platform_serial_table[] = { { .compatible = "ns16850", .data = (void *)PORT_16850, }, { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, }, { .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, }, + { .compatible = "altr,16550-FIFO32", + .data = (void *)PORT_ALTR_16550_F32, }, + { .compatible = "altr,16550-FIFO64", + .data = (void *)PORT_ALTR_16550_F64, }, + { .compatible = "altr,16550-FIFO128", + .data = (void *)PORT_ALTR_16550_F128, }, #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL { .compatible = "ibm,qpace-nwp-serial", .data = (void *)PORT_NWPSERIAL, }, diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 4dc41408ecb7..30d4f7a783cd 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); /* FIFO ENABLE, DMA MODE */ + up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; + /* + * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK + * sets Enables the granularity of 1 for TRIGGER RX + * level. Along with setting RX FIFO trigger level + * to 1 (as noted below, 16 characters) and TLR[3:0] + * to zero this will result RX FIFO threshold level + * to 1 character, instead of 16 as noted in comment + * below. + */ + /* Set receive FIFO threshold to 16 characters and * transmit FIFO threshold to 16 spaces */ diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index e343d6670854..451687cb9685 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = { #define UART_NR 4 static struct uart_sunsu_port sunsu_ports[UART_NR]; +static int nr_inst; /* Number of already registered ports */ #ifdef CONFIG_SERIO @@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options) printk("Console: ttyS%d (SU)\n", (sunsu_reg.minor - 64) + co->index); - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index >= UART_NR) - co->index = 0; + if (co->index > nr_inst) + return -ENODEV; port = &sunsu_ports[co->index].port; /* @@ -1408,7 +1404,6 @@ static enum su_type su_get_type(struct device_node *dp) static int su_probe(struct platform_device *op) { - static int inst; struct device_node *dp = op->dev.of_node; struct uart_sunsu_port *up; struct resource *rp; @@ -1418,16 +1413,16 @@ static int su_probe(struct platform_device *op) type = su_get_type(dp); if (type == SU_PORT_PORT) { - if (inst >= UART_NR) + if (nr_inst >= UART_NR) return -EINVAL; - up = &sunsu_ports[inst]; + up = &sunsu_ports[nr_inst]; } else { up = kzalloc(sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; } - up->port.line = inst; + up->port.line = nr_inst; spin_lock_init(&up->port.lock); @@ -1461,6 +1456,8 @@ static int su_probe(struct platform_device *op) } dev_set_drvdata(&op->dev, up); + nr_inst++; + return 0; } @@ -1488,7 +1485,7 @@ static int su_probe(struct platform_device *op) dev_set_drvdata(&op->dev, up); - inst++; + nr_inst++; return 0; diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index a3f9dd5c9dff..705240e6c4ec 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c @@ -611,14 +611,7 @@ static int vt8500_serial_probe(struct platform_device *pdev) vt8500_port->uart.dev = &pdev->dev; vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; - vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); - if (!IS_ERR(vt8500_port->clk)) { - vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); - } else { - /* use the default of 24Mhz if not specified and warn */ - pr_warn("%s: serial clock source not specified\n", __func__); - vt8500_port->uart.uartclk = 24000000; - } + vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); snprintf(vt8500_port->name, sizeof(vt8500_port->name), "VT8500 UART%d", pdev->id); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index ba451c7209fc..f36bbba1ac8b 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -578,6 +578,8 @@ static int xuartps_startup(struct uart_port *port) /* Receive Timeout register is enabled with value of 10 */ xuartps_writel(10, XUARTPS_RXTOUT_OFFSET); + /* Clear out any pending interrupts before enabling them */ + xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET); /* Set the Interrupt Registers with desired interrupts */ xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY | diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index bb119934e76c..578aa7594b11 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -425,7 +425,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_ldisc *disc; tty = port->itty; - if (WARN_RATELIMIT(tty == NULL, "tty is NULL\n")) + if (tty == NULL) return; disc = tty_ldisc_ref(tty); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 05400acbc456..b0452688308c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); +static void tty_update_time(struct timespec *time) +{ + unsigned long sec = get_seconds(); + sec -= sec % 60; + if ((long)(sec - time->tv_sec) > 0) + time->tv_sec = sec; +} + /** * tty_read - read method for tty device files * @file: pointer to tty file @@ -960,10 +968,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int i; + struct inode *inode = file_inode(file); struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; - if (tty_paranoia_check(tty, file_inode(file), "tty_read")) + if (tty_paranoia_check(tty, inode, "tty_read")) return -EIO; if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) return -EIO; @@ -977,6 +986,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, i = -EIO; tty_ldisc_deref(ld); + if (i > 0) + tty_update_time(&inode->i_atime); + return i; } @@ -1077,8 +1089,10 @@ static inline ssize_t do_tty_write( break; cond_resched(); } - if (written) + if (written) { + tty_update_time(&file_inode(file)->i_mtime); ret = written; + } out: tty_write_unlock(tty); return ret; diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index e4ca345873c3..d7799deacb21 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll) static struct vcs_poll_data * vcs_poll_data_get(struct file *file) { - struct vcs_poll_data *poll = file->private_data; + struct vcs_poll_data *poll = file->private_data, *kill = NULL; if (poll) return poll; @@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file) file->private_data = poll; } else { /* someone else raced ahead of us */ - vcs_poll_data_free(poll); + kill = poll; poll = file->private_data; } spin_unlock(&file->f_lock); + if (kill) + vcs_poll_data_free(kill); return poll; } diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index f5ed3d75fa5a..8f5ebced5df0 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/ obj-$(CONFIG_USB_SERIAL) += serial/ obj-$(CONFIG_USB) += misc/ -obj-$(CONFIG_USB_COMMON) += phy/ +obj-$(CONFIG_USB_OTG_UTILS) += phy/ obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/ obj-$(CONFIG_USB_ATM) += atm/ diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index a03fbc15fa9c..aa491627a45b 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -100,7 +100,7 @@ struct c67x00_urb_priv { #define TD_PIDEP_OFFSET 0x04 #define TD_PIDEPMASK_PID 0xF0 #define TD_PIDEPMASK_EP 0x0F -#define TD_PORTLENMASK_DL 0x02FF +#define TD_PORTLENMASK_DL 0x03FF #define TD_PORTLENMASK_PN 0xC000 #define TD_STATUS_OFFSET 0x07 @@ -590,7 +590,7 @@ static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb, { struct c67x00_td *td; struct c67x00_urb_priv *urbp = urb->hcpriv; - const __u8 active_flag = 1, retry_cnt = 1; + const __u8 active_flag = 1, retry_cnt = 3; __u8 cmd = 0; int tt = 0; diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 2f45bba8561d..f64fbea1cf20 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1767,7 +1767,7 @@ static int udc_start(struct ci13xxx *ci) goto put_transceiver; } - retval = dbg_create_files(&ci->gadget.dev); + retval = dbg_create_files(ci->dev); if (retval) goto unreg_device; @@ -1796,7 +1796,7 @@ remove_trans: dev_err(dev, "error = %i\n", retval); remove_dbg: - dbg_remove_files(&ci->gadget.dev); + dbg_remove_files(ci->dev); unreg_device: device_unregister(&ci->gadget.dev); put_transceiver: @@ -1836,7 +1836,7 @@ static void udc_stop(struct ci13xxx *ci) if (ci->global_phy) usb_put_phy(ci->transceiver); } - dbg_remove_files(&ci->gadget.dev); + dbg_remove_files(ci->dev); device_unregister(&ci->gadget.dev); /* my kobject is dynamic, I swear! */ memset(&ci->gadget, 0, sizeof(ci->gadget)); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 8ac25adf31b4..387dc6c8ad25 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty_port *port) dev_dbg(&acm->control->dev, "%s\n", __func__); - tty_unregister_device(acm_tty_driver, acm->minor); acm_release_minor(acm); usb_put_intf(acm->control); kfree(acm->country_codes); @@ -977,6 +976,8 @@ static int acm_probe(struct usb_interface *intf, int num_rx_buf; int i; int combined_interfaces = 0; + struct device *tty_dev; + int rv = -ENOMEM; /* normal quirks */ quirks = (unsigned long)id->driver_info; @@ -1339,11 +1340,24 @@ skip_countries: usb_set_intfdata(data_interface, acm); usb_get_intf(control_interface); - tty_port_register_device(&acm->port, acm_tty_driver, minor, + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); + if (IS_ERR(tty_dev)) { + rv = PTR_ERR(tty_dev); + goto alloc_fail8; + } return 0; +alloc_fail8: + if (acm->country_codes) { + device_remove_file(&acm->control->dev, + &dev_attr_wCountryCodes); + device_remove_file(&acm->control->dev, + &dev_attr_iCountryCodeRelDate); + } + device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); alloc_fail7: + usb_set_intfdata(intf, NULL); for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); alloc_fail6: @@ -1359,7 +1373,7 @@ alloc_fail2: acm_release_minor(acm); kfree(acm); alloc_fail: - return -ENOMEM; + return rv; } static void stop_data_traffic(struct acm *acm) @@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_interface *intf) stop_data_traffic(acm); + tty_unregister_device(acm_tty_driver, acm->minor); + usb_free_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 5f0cb417b736..122d056d96d5 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -56,6 +56,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_RESPONDING 7 #define WDM_SUSPENDING 8 #define WDM_RESETTING 9 +#define WDM_OVERFLOW 10 #define WDM_MAX 16 @@ -155,6 +156,7 @@ static void wdm_in_callback(struct urb *urb) { struct wdm_device *desc = urb->context; int status = urb->status; + int length = urb->actual_length; spin_lock(&desc->iuspin); clear_bit(WDM_RESPONDING, &desc->flags); @@ -185,9 +187,17 @@ static void wdm_in_callback(struct urb *urb) } desc->rerr = status; - desc->reslength = urb->actual_length; - memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); - desc->length += desc->reslength; + if (length + desc->length > desc->wMaxCommand) { + /* The buffer would overflow */ + set_bit(WDM_OVERFLOW, &desc->flags); + } else { + /* we may already be in overflow */ + if (!test_bit(WDM_OVERFLOW, &desc->flags)) { + memmove(desc->ubuf + desc->length, desc->inbuf, length); + desc->length += length; + desc->reslength = length; + } + } skip_error: wake_up(&desc->wait); @@ -435,6 +445,11 @@ retry: rv = -ENODEV; goto err; } + if (test_bit(WDM_OVERFLOW, &desc->flags)) { + clear_bit(WDM_OVERFLOW, &desc->flags); + rv = -ENOBUFS; + goto err; + } i++; if (file->f_flags & O_NONBLOCK) { if (!test_bit(WDM_READ, &desc->flags)) { @@ -478,6 +493,7 @@ retry: spin_unlock_irq(&desc->iuspin); goto retry; } + if (!desc->reslength) { /* zero length read */ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); clear_bit(WDM_READ, &desc->flags); @@ -1004,6 +1020,7 @@ static int wdm_post_reset(struct usb_interface *intf) struct wdm_device *desc = wdm_find_device(intf); int rv; + clear_bit(WDM_OVERFLOW, &desc->flags); clear_bit(WDM_RESETTING, &desc->flags); rv = recover_from_urb_loss(desc); mutex_unlock(&desc->wlock); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 622b4a48e732..2b487d4797bd 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) struct hc_driver *driver; struct usb_hcd *hcd; int retval; + int hcd_irq = 0; if (usb_disabled()) return -ENODEV; @@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; dev->current_state = PCI_D0; - /* The xHCI driver supports MSI and MSI-X, - * so don't fail if the BIOS doesn't provide a legacy IRQ. + /* + * The xHCI driver has its own irq management + * make sure irq setup is not touched for xhci in generic hcd code */ - if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { - dev_err(&dev->dev, - "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", - pci_name(dev)); - retval = -ENODEV; - goto disable_pci; + if ((driver->flags & HCD_MASK) != HCD_USB3) { + if (!dev->irq) { + dev_err(&dev->dev, + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", + pci_name(dev)); + retval = -ENODEV; + goto disable_pci; + } + hcd_irq = dev->irq; } hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); @@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); - retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); + retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); if (retval != 0) goto unmap_registers; set_hs_companion(dev, hcd); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 99b34a30354f..f9ec44cbb82f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2412,6 +2412,14 @@ int usb_hcd_is_primary_hcd(struct usb_hcd *hcd) } EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd); +int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1) +{ + if (!hcd->driver->find_raw_port_number) + return port1; + + return hcd->driver->find_raw_port_number(hcd, port1); +} + static int usb_hcd_request_irqs(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags) { diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d514732..65d4e55552c6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev) { struct usb_port *port_dev = to_usb_port(dev); - dev_pm_qos_hide_flags(dev); kfree(port_dev); } diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index b6f4bad3f756..255c14464bf2 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/usb/hcd.h> #include <acpi/acpi_bus.h> #include "usb.h" @@ -188,8 +189,13 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) * connected to. */ if (!udev->parent) { - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + int raw_port_num; + + raw_port_num = usb_hcd_find_raw_port_number(hcd, port_num); + *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), + raw_port_num); if (!*handle) return -ENODEV; } else { diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 999909451e37..ffa6b004a84b 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -583,6 +583,7 @@ static int dwc3_remove(struct platform_device *pdev) break; } + dwc3_free_event_buffers(dwc); dwc3_core_exit(dwc); return 0; diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index b50da53e9a52..b082bec7343e 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -23,8 +23,6 @@ #include <linux/usb/nop-usb-xceiv.h> #include <linux/of.h> -#include "core.h" - struct dwc3_exynos { struct platform_device *dwc3; struct platform_device *usb2_phy; diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 22f337f57219..afa05e3c9cf4 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -54,8 +54,6 @@ #include <linux/usb/otg.h> #include <linux/usb/nop-usb-xceiv.h> -#include "core.h" - /* * All these registers belong to OMAP's Wrapper around the * DesignWare USB3 Core. @@ -465,20 +463,20 @@ static int dwc3_omap_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id of_dwc3_matach[] = { +static const struct of_device_id of_dwc3_match[] = { { "ti,dwc3", }, { }, }; -MODULE_DEVICE_TABLE(of, of_dwc3_matach); +MODULE_DEVICE_TABLE(of, of_dwc3_match); static struct platform_driver dwc3_omap_driver = { .probe = dwc3_omap_probe, .remove = dwc3_omap_remove, .driver = { .name = "omap-dwc3", - .of_match_table = of_dwc3_matach, + .of_match_table = of_dwc3_match, }, }; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7d70f44567d2..e8d77689a322 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -45,8 +45,6 @@ #include <linux/usb/otg.h> #include <linux/usb/nop-usb-xceiv.h> -#include "core.h" - /* FIXME define these in <linux/pci_ids.h> */ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index d7da073a23fe..1d139ca05ef1 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -891,7 +891,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, DWC3_TRBCTL_CONTROL_DATA); } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) && (dep->number == 0)) { - u32 transfer_size; + u32 transfer_size; + u32 maxpacket; ret = usb_gadget_map_request(&dwc->gadget, &req->request, dep->number); @@ -902,8 +903,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE); - transfer_size = roundup(req->request.length, - (u32) dep->endpoint.maxpacket); + maxpacket = dep->endpoint.maxpacket; + transfer_size = roundup(req->request.length, maxpacket); dwc->ep0_bounced = true; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a04342f6cbfa..82e160e96fca 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2159,7 +2159,6 @@ static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed) static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) { - struct dwc3_gadget_ep_cmd_params params; struct dwc3_ep *dep; int ret; u32 reg; @@ -2167,8 +2166,6 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) dev_vdbg(dwc->dev, "%s\n", __func__); - memset(¶ms, 0x00, sizeof(params)); - reg = dwc3_readl(dwc->regs, DWC3_DSTS); speed = reg & DWC3_DSTS_CONNECTSPD; dwc->speed = speed; diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 5a0c541daf89..c7525b1cad74 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -145,6 +145,7 @@ config USB_LPC32XX tristate "LPC32XX USB Peripheral Controller" depends on ARCH_LPC32XX select USB_ISP1301 + select USB_OTG_UTILS help This option selects the USB device controller in the LPC32xx SoC. diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index 97a13c349cc5..82fb22511356 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile @@ -35,6 +35,12 @@ mv_udc-y := mv_udc_core.o obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o +# USB Functions +obj-$(CONFIG_USB_F_ACM) += f_acm.o +f_ss_lb-y := f_loopback.o f_sourcesink.o +obj-$(CONFIG_USB_F_SS_LB) += f_ss_lb.o +obj-$(CONFIG_USB_U_SERIAL) += u_serial.o + # # USB gadget drivers # @@ -74,9 +80,3 @@ obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o obj-$(CONFIG_USB_G_NCM) += g_ncm.o obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o - -# USB Functions -obj-$(CONFIG_USB_F_ACM) += f_acm.o -f_ss_lb-y := f_loopback.o f_sourcesink.o -obj-$(CONFIG_USB_F_SS_LB) += f_ss_lb.o -obj-$(CONFIG_USB_U_SERIAL) += u_serial.o diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 7c821de8ce3d..c0d62b278610 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1757,10 +1757,7 @@ static const struct usb_gadget_driver composite_driver_template = { /** * usb_composite_probe() - register a composite driver * @driver: the driver to register - * @bind: the callback used to allocate resources that are shared across the - * whole device, such as string IDs, and add its configurations using - * @usb_add_config(). This may fail by returning a negative errno - * value; it should return zero on successful initialization. + * * Context: single threaded during gadget setup * * This function is used to register drivers using the composite driver diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 71beeb833558..cc9c49c57c80 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -447,14 +447,13 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rndis *rndis = req->context; - struct usb_composite_dev *cdev = rndis->port.func.config->cdev; int status; /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ // spin_lock(&dev->lock); status = rndis_msg_parser(rndis->config, (u8 *) req->buf); if (status < 0) - ERROR(cdev, "RNDIS command error %d, %d/%d\n", + pr_err("RNDIS command error %d, %d/%d\n", status, req->actual, req->length); // spin_unlock(&dev->lock); } diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c index f570e667a640..fa8ea4ea00c1 100644 --- a/drivers/usb/gadget/f_uac1.c +++ b/drivers/usb/gadget/f_uac1.c @@ -418,6 +418,7 @@ static int audio_get_intf_req(struct usb_function *f, req->context = audio; req->complete = f_audio_complete; + len = min_t(size_t, sizeof(value), len); memcpy(req->buf, &value, len); return len; diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index 3953dd4d7186..3b343b23e4b0 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c @@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev) goto error; gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; - for (i = func_num; --i; ) { + for (i = func_num; i--; ) { ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); if (unlikely(ret < 0)) { while (++i < func_num) @@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev) gether_cleanup(); gfs_ether_setup = false; - for (i = func_num; --i; ) + for (i = func_num; i--; ) if (ffs_tab[i].ffs_data) functionfs_unbind(ffs_tab[i].ffs_data); diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c index 8efd7555fa21..5bd930d779b9 100644 --- a/drivers/usb/gadget/imx_udc.c +++ b/drivers/usb/gadget/imx_udc.c @@ -1334,27 +1334,18 @@ static int imx_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct imx_udc_struct *imx_usb; - int retval; imx_usb = container_of(gadget, struct imx_udc_struct, gadget); /* first hook up the driver ... */ imx_usb->driver = driver; imx_usb->gadget.dev.driver = &driver->driver; - retval = device_add(&imx_usb->gadget.dev); - if (retval) - goto fail; - D_INI(imx_usb->dev, "<%s> registered gadget driver '%s'\n", __func__, driver->driver.name); imx_udc_enable(imx_usb); return 0; -fail: - imx_usb->driver = NULL; - imx_usb->gadget.dev.driver = NULL; - return retval; } static int imx_udc_stop(struct usb_gadget *gadget, @@ -1370,8 +1361,6 @@ static int imx_udc_stop(struct usb_gadget *gadget, imx_usb->gadget.dev.driver = NULL; imx_usb->driver = NULL; - device_del(&imx_usb->gadget.dev); - D_INI(imx_usb->dev, "<%s> unregistered gadget driver '%s'\n", __func__, driver->driver.name); @@ -1477,6 +1466,10 @@ static int __init imx_udc_probe(struct platform_device *pdev) imx_usb->gadget.dev.parent = &pdev->dev; imx_usb->gadget.dev.dma_mask = pdev->dev.dma_mask; + ret = device_add(&imx_usb->gadget.dev); + if (retval) + goto fail4; + platform_set_drvdata(pdev, imx_usb); usb_init_data(imx_usb); @@ -1488,9 +1481,11 @@ static int __init imx_udc_probe(struct platform_device *pdev) ret = usb_add_gadget_udc(&pdev->dev, &imx_usb->gadget); if (ret) - goto fail4; + goto fail5; return 0; +fail5: + device_unregister(&imx_usb->gadget.dev); fail4: for (i = 0; i < IMX_USB_NB_EP + 1; i++) free_irq(imx_usb->usbd_int[i], imx_usb); @@ -1514,6 +1509,7 @@ static int __exit imx_udc_remove(struct platform_device *pdev) int i; usb_del_gadget_udc(&imx_usb->gadget); + device_unregister(&imx_usb->gadget.dev); imx_udc_disable(imx_usb); del_timer(&imx_usb->timer); diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c index d226058e3b88..32524b631959 100644 --- a/drivers/usb/gadget/net2272.c +++ b/drivers/usb/gadget/net2272.c @@ -59,7 +59,7 @@ static const char * const ep_name[] = { }; #define DMA_ADDR_INVALID (~(dma_addr_t)0) -#ifdef CONFIG_USB_GADGET_NET2272_DMA +#ifdef CONFIG_USB_NET2272_DMA /* * use_dma: the NET2272 can use an external DMA controller. * Note that since there is no generic DMA api, some functions, @@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) for (i = 0; i < 4; ++i) net2272_dequeue_all(&dev->ep[i]); + /* report disconnect; the driver is already quiesced */ + if (driver) { + spin_unlock(&dev->lock); + driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } + net2272_usb_reinit(dev); } diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index a1b650e11339..3bd0f992fb49 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadget *_gadget, err_func: device_remove_file (&dev->pdev->dev, &dev_attr_function); err_unbind: - driver->unbind (&dev->gadget); dev->gadget.dev.driver = NULL; dev->driver = NULL; return retval; @@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) for (i = 0; i < 7; i++) nuke (&dev->ep [i]); + /* report disconnect; the driver is already quiesced */ + if (driver) { + spin_unlock(&dev->lock); + driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } + usb_reinit (dev); } diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 06be85c2b233..f8445653577f 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c @@ -62,6 +62,7 @@ #define DRIVER_VERSION "4 October 2004" #define OMAP_DMA_USB_W2FC_TX0 29 +#define OMAP_DMA_USB_W2FC_RX0 26 /* * The OMAP UDC needs _very_ early endpoint setup: before enabling the @@ -1310,7 +1311,7 @@ static int omap_pullup(struct usb_gadget *gadget, int is_on) } static int omap_udc_start(struct usb_gadget *g, - struct usb_gadget_driver *driver) + struct usb_gadget_driver *driver); static int omap_udc_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index 2bbcdce942dc..d0f37484b6b0 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c @@ -1266,13 +1266,6 @@ static int pxa25x_udc_start(struct usb_gadget *g, dev->gadget.dev.driver = &driver->driver; dev->pullup = 1; - retval = device_add (&dev->gadget.dev); - if (retval) { - dev->driver = NULL; - dev->gadget.dev.driver = NULL; - return retval; - } - /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. */ @@ -1310,6 +1303,10 @@ stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver) } del_timer_sync(&dev->timer); + /* report disconnect; the driver is already quiesced */ + if (driver) + driver->disconnect(&dev->gadget); + /* re-init driver-visible data structures */ udc_reinit(dev); } @@ -1331,7 +1328,6 @@ static int pxa25x_udc_stop(struct usb_gadget*g, dev->gadget.dev.driver = NULL; dev->driver = NULL; - device_del (&dev->gadget.dev); dump_state(dev); return 0; @@ -2146,6 +2142,13 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev) dev->gadget.dev.parent = &pdev->dev; dev->gadget.dev.dma_mask = pdev->dev.dma_mask; + retval = device_add(&dev->gadget.dev); + if (retval) { + dev->driver = NULL; + dev->gadget.dev.driver = NULL; + goto err_device_add; + } + the_controller = dev; platform_set_drvdata(pdev, dev); @@ -2196,6 +2199,8 @@ lubbock_fail0: free_irq(irq, dev); #endif err_irq1: + device_unregister(&dev->gadget.dev); + err_device_add: if (gpio_is_valid(dev->mach->gpio_pullup)) gpio_free(dev->mach->gpio_pullup); err_gpio_pullup: @@ -2217,10 +2222,11 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev) { struct pxa25x_udc *dev = platform_get_drvdata(pdev); - usb_del_gadget_udc(&dev->gadget); if (dev->driver) return -EBUSY; + usb_del_gadget_udc(&dev->gadget); + device_unregister(&dev->gadget.dev); dev->pullup = 0; pullup(dev); diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index f7d25795821a..2fc867652ef5 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c @@ -1814,11 +1814,6 @@ static int pxa27x_udc_start(struct usb_gadget *g, udc->gadget.dev.driver = &driver->driver; dplus_pullup(udc, 1); - retval = device_add(&udc->gadget.dev); - if (retval) { - dev_err(udc->dev, "device_add error %d\n", retval); - goto fail; - } if (!IS_ERR_OR_NULL(udc->transceiver)) { retval = otg_set_peripheral(udc->transceiver->otg, &udc->gadget); @@ -1876,7 +1871,6 @@ static int pxa27x_udc_stop(struct usb_gadget *g, udc->driver = NULL; - device_del(&udc->gadget.dev); if (!IS_ERR_OR_NULL(udc->transceiver)) return otg_set_peripheral(udc->transceiver->otg, NULL); @@ -2480,13 +2474,24 @@ static int __init pxa_udc_probe(struct platform_device *pdev) driver_name, udc->irq, retval); goto err_irq; } + + retval = device_add(&udc->gadget.dev); + if (retval) { + dev_err(udc->dev, "device_add error %d\n", retval); + goto err_dev_add; + } + retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget); if (retval) goto err_add_udc; pxa_init_debugfs(udc); + return 0; + err_add_udc: + device_unregister(&udc->gadget.dev); +err_dev_add: free_irq(udc->irq, udc); err_irq: iounmap(udc->regs); @@ -2507,6 +2512,7 @@ static int __exit pxa_udc_remove(struct platform_device *_dev) int gpio = udc->mach->gpio_pullup; usb_del_gadget_udc(&udc->gadget); + device_del(&udc->gadget.dev); usb_gadget_unregister_driver(udc->driver); free_irq(udc->irq, udc); pxa_cleanup_debugfs(udc); diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index fc07b4381286..08f89652533b 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c @@ -1668,8 +1668,7 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev) static int s3c2410_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { - struct s3c2410_udc *udc = to_s3c2410(g) - int retval; + struct s3c2410_udc *udc = to_s3c2410(g); dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name); @@ -1677,22 +1676,10 @@ static int s3c2410_udc_start(struct usb_gadget *g, udc->driver = driver; udc->gadget.dev.driver = &driver->driver; - /* Bind the driver */ - retval = device_add(&udc->gadget.dev); - if (retval) { - dev_err(&udc->gadget.dev, "Error in device_add() : %d\n", retval); - goto register_error; - } - /* Enable udc */ s3c2410_udc_enable(udc); return 0; - -register_error: - udc->driver = NULL; - udc->gadget.dev.driver = NULL; - return retval; } static int s3c2410_udc_stop(struct usb_gadget *g, @@ -1700,7 +1687,6 @@ static int s3c2410_udc_stop(struct usb_gadget *g, { struct s3c2410_udc *udc = to_s3c2410(g); - device_del(&udc->gadget.dev); udc->driver = NULL; /* Disable udc */ @@ -1842,6 +1828,13 @@ static int s3c2410_udc_probe(struct platform_device *pdev) udc->gadget.dev.parent = &pdev->dev; udc->gadget.dev.dma_mask = pdev->dev.dma_mask; + /* Bind the driver */ + retval = device_add(&udc->gadget.dev); + if (retval) { + dev_err(&udc->gadget.dev, "Error in device_add() : %d\n", retval); + goto err_device_add; + } + the_controller = udc; platform_set_drvdata(pdev, udc); @@ -1930,6 +1923,8 @@ err_gpio_claim: err_int: free_irq(IRQ_USBD, udc); err_map: + device_unregister(&udc->gadget.dev); +err_device_add: iounmap(base_addr); err_mem: release_mem_region(rsrc_start, rsrc_len); @@ -1947,10 +1942,11 @@ static int s3c2410_udc_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s()\n", __func__); - usb_del_gadget_udc(&udc->gadget); if (udc->driver) return -EBUSY; + usb_del_gadget_udc(&udc->gadget); + device_unregister(&udc->gadget.dev); debugfs_remove(udc->regs_info); if (udc_info && !udc_info->udc_command && diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index c5034d9c946b..b369292d4b90 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c @@ -136,7 +136,7 @@ static struct portmaster { pr_debug(fmt, ##arg) #endif /* pr_vdebug */ #else -#ifndef pr_vdebig +#ifndef pr_vdebug #define pr_vdebug(fmt, arg...) \ ({ if (0) pr_debug(fmt, ##arg); }) #endif /* pr_vdebug */ diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c index e0c5e88e03ed..c7d460f43390 100644 --- a/drivers/usb/gadget/u_uac1.c +++ b/drivers/usb/gadget/u_uac1.c @@ -240,8 +240,11 @@ static int gaudio_open_snd_dev(struct gaudio *card) snd = &card->playback; snd->filp = filp_open(fn_play, O_WRONLY, 0); if (IS_ERR(snd->filp)) { + int ret = PTR_ERR(snd->filp); + ERROR(card, "No such PCM playback device: %s\n", fn_play); snd->filp = NULL; + return ret; } pcm_file = snd->filp->private_data; snd->substream = pcm_file->substream; diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 2a9cd369f71c..f8f62c3ed65e 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c @@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_disconnect(udc->gadget); udc->driver->disconnect(udc->gadget); udc->driver->unbind(udc->gadget); - usb_gadget_udc_stop(udc->gadget, udc->driver); + usb_gadget_udc_stop(udc->gadget, NULL); udc->driver = NULL; udc->dev.driver = NULL; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b416a3fc9959..416a6dce5e11 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci) static void end_unlink_async(struct ehci_hcd *ehci); static void unlink_empty_async(struct ehci_hcd *ehci); +static void unlink_empty_async_suspended(struct ehci_hcd *ehci); static void ehci_work(struct ehci_hcd *ehci); static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); @@ -748,11 +749,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) /* guard against (alleged) silicon errata */ if (cmd & CMD_IAAD) ehci_dbg(ehci, "IAA with IAAD still set?\n"); - if (ehci->async_iaa) { + if (ehci->async_iaa) COUNT(ehci->stats.iaa); - end_unlink_async(ehci); - } else - ehci_dbg(ehci, "IAA with nothing unlinked?\n"); + end_unlink_async(ehci); } /* remote wakeup [4.3.1] */ diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4d3b294f203e..7d06e77f6c4f 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) ehci->rh_state = EHCI_RH_SUSPENDED; end_unlink_async(ehci); - unlink_empty_async(ehci); + unlink_empty_async_suspended(ehci); ehci_handle_intr_unlinks(ehci); end_free_itds(ehci); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index fd252f0cfb3a..23d136904285 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -135,7 +135,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) * qtd is updated in qh_completions(). Update the QH * overlay here. */ - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) { + if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { qh->hw->hw_qtd_next = qtd->hw_next; qtd = NULL; } @@ -449,11 +449,19 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) else if (last_status == -EINPROGRESS && !urb->unlinked) continue; - /* qh unlinked; token in overlay may be most current */ - if (state == QH_STATE_IDLE - && cpu_to_hc32(ehci, qtd->qtd_dma) - == hw->hw_current) { + /* + * If this was the active qtd when the qh was unlinked + * and the overlay's token is active, then the overlay + * hasn't been written back to the qtd yet so use its + * token instead of the qtd's. After the qtd is + * processed and removed, the overlay won't be valid + * any more. + */ + if (state == QH_STATE_IDLE && + qh->qtd_list.next == &qtd->qtd_list && + (hw->hw_token & ACTIVE_BIT(ehci))) { token = hc32_to_cpu(ehci, hw->hw_token); + hw->hw_token &= ~ACTIVE_BIT(ehci); /* An unlink may leave an incomplete * async transaction in the TT buffer. @@ -1170,7 +1178,7 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) struct ehci_qh *prev; /* Add to the end of the list of QHs waiting for the next IAAD */ - qh->qh_state = QH_STATE_UNLINK; + qh->qh_state = QH_STATE_UNLINK_WAIT; if (ehci->async_unlink) ehci->async_unlink_last->unlink_next = qh; else @@ -1213,9 +1221,19 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested) /* Do only the first waiting QH (nVidia bug?) */ qh = ehci->async_unlink; - ehci->async_iaa = qh; - ehci->async_unlink = qh->unlink_next; - qh->unlink_next = NULL; + + /* + * Intel (?) bug: The HC can write back the overlay region + * even after the IAA interrupt occurs. In self-defense, + * always go through two IAA cycles for each QH. + */ + if (qh->qh_state == QH_STATE_UNLINK_WAIT) { + qh->qh_state = QH_STATE_UNLINK; + } else { + ehci->async_iaa = qh; + ehci->async_unlink = qh->unlink_next; + qh->unlink_next = NULL; + } /* Make sure the unlinks are all visible to the hardware */ wmb(); @@ -1298,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci) } } +/* The root hub is suspended; unlink all the async QHs */ +static void unlink_empty_async_suspended(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + + while (ehci->async->qh_next.qh) { + qh = ehci->async->qh_next.qh; + WARN_ON(!list_empty(&qh->qtd_list)); + single_unlink_async(ehci, qh); + } + start_iaa_cycle(ehci, false); +} + /* makes sure the async qh will become idle */ /* caller must own ehci->lock */ diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index b476daf49f6f..010f686d8881 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1214,6 +1214,7 @@ itd_urb_transaction ( memset (itd, 0, sizeof *itd); itd->itd_dma = itd_dma; + itd->frame = 9999; /* an invalid value */ list_add (&itd->itd_list, &sched->td_list); } spin_unlock_irqrestore (&ehci->lock, flags); @@ -1915,6 +1916,7 @@ sitd_urb_transaction ( memset (sitd, 0, sizeof *sitd); sitd->sitd_dma = sitd_dma; + sitd->frame = 9999; /* an invalid value */ list_add (&sitd->sitd_list, &iso_sched->td_list); } diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0f..c3fa1305f830 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci) * (a) SMP races against real IAA firing and retriggering, and * (b) clean HC shutdown, when IAA watchdog was pending. */ - if (ehci->async_iaa) { + if (1) { u32 cmd, status; /* If we get here, IAA is *REALLY* late. It's barely diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 35616ffbe3ae..6dc238c592bc 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1022,44 +1022,24 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, * is attached to (or the roothub port its ancestor hub is attached to). All we * know is the index of that port under either the USB 2.0 or the USB 3.0 * roothub, but that doesn't give us the real index into the HW port status - * registers. Scan through the xHCI roothub port array, looking for the Nth - * entry of the correct port speed. Return the port number of that entry. + * registers. Call xhci_find_raw_port_number() to get real index. */ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, struct usb_device *udev) { struct usb_device *top_dev; - unsigned int num_similar_speed_ports; - unsigned int faked_port_num; - int i; + struct usb_hcd *hcd; + + if (udev->speed == USB_SPEED_SUPER) + hcd = xhci->shared_hcd; + else + hcd = xhci->main_hcd; for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; - faked_port_num = top_dev->portnum; - for (i = 0, num_similar_speed_ports = 0; - i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { - u8 port_speed = xhci->port_array[i]; - - /* - * Skip ports that don't have known speeds, or have duplicate - * Extended Capabilities port speed entries. - */ - if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) - continue; - /* - * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and - * 1.1 ports are under the USB 2.0 hub. If the port speed - * matches the device speed, it's a similar speed port. - */ - if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER)) - num_similar_speed_ports++; - if (num_similar_speed_ports == faked_port_num) - /* Roothub ports are numbered from 1 to N */ - return i+1; - } - return 0; + return xhci_find_raw_port_number(hcd, top_dev->portnum); } /* Setup an xHCI virtual device for a Set Address command */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index af259e0ec172..1a30c380043c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -313,6 +313,7 @@ static const struct hc_driver xhci_pci_hc_driver = { .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, + .find_raw_port_number = xhci_find_raw_port_number, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 882875465301..1969c001b3f9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1599,14 +1599,20 @@ static void handle_port_status(struct xhci_hcd *xhci, max_ports = HCS_MAX_PORTS(xhci->hcs_params1); if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Invalid port id %d\n", port_id); - bogus_port_status = true; - goto cleanup; + inc_deq(xhci, xhci->event_ring); + return; } /* Figure out which usb_hcd this port is attached to: * is it a USB 3.0 port or a USB 2.0/1.1 port? */ major_revision = xhci->port_array[port_id - 1]; + + /* Find the right roothub. */ + hcd = xhci_to_hcd(xhci); + if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) + hcd = xhci->shared_hcd; + if (major_revision == 0) { xhci_warn(xhci, "Event for port %u not in " "Extended Capabilities, ignoring.\n", @@ -1629,10 +1635,6 @@ static void handle_port_status(struct xhci_hcd *xhci, * into the index into the ports on the correct split roothub, and the * correct bus_state structure. */ - /* Find the right roothub. */ - hcd = xhci_to_hcd(xhci); - if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) - hcd = xhci->shared_hcd; bus_state = &xhci->bus_state[hcd_index(hcd)]; if (hcd->speed == HCD_USB3) port_array = xhci->usb3_ports; @@ -2027,8 +2029,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) td->urb->actual_length = - td->urb->transfer_buffer_length - - TRB_LEN(le32_to_cpu(event->transfer_len)); + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); else td->urb->actual_length = 0; @@ -2060,7 +2062,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, /* Maybe the event was for the data stage? */ td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); xhci_dbg(xhci, "Waiting for status " "stage event\n"); return 0; @@ -2096,7 +2098,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: - if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { frame->status = 0; break; } @@ -2141,7 +2143,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); } len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); if (trb_comp_code != COMP_STOP_INVAL) { frame->actual_length = len; @@ -2199,7 +2201,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, case COMP_SUCCESS: /* Double check that the HW transferred everything. */ if (event_trb != td->last_trb || - TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { xhci_warn(xhci, "WARN Successful completion " "on short TX\n"); if (td->urb->transfer_flags & URB_SHORT_NOT_OK) @@ -2227,18 +2229,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, "%d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, td->urb->transfer_buffer_length, - TRB_LEN(le32_to_cpu(event->transfer_len))); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); /* Fast path - was this the last TRB in the TD for this URB? */ if (event_trb == td->last_trb) { - if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); if (td->urb->transfer_buffer_length < td->urb->actual_length) { xhci_warn(xhci, "HC gave bad length " "of %d bytes left\n", - TRB_LEN(le32_to_cpu(event->transfer_len))); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); td->urb->actual_length = 0; if (td->urb->transfer_flags & URB_SHORT_NOT_OK) *status = -EREMOTEIO; @@ -2280,7 +2282,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code != COMP_STOP_INVAL) td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); } return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -2368,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: - if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) break; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) trb_comp_code = COMP_SHORT_TX; @@ -2461,14 +2463,21 @@ static int handle_tx_event(struct xhci_hcd *xhci, * TD list. */ if (list_empty(&ep_ring->td_list)) { - xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " - "with no TDs queued?\n", - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), - ep_index); - xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", - (le32_to_cpu(event->flags) & - TRB_TYPE_BITMASK)>>10); - xhci_print_trb_offsets(xhci, (union xhci_trb *) event); + /* + * A stopped endpoint may generate an extra completion + * event if the device was suspended. Don't print + * warnings. + */ + if (!(trb_comp_code == COMP_STOP || + trb_comp_code == COMP_STOP_INVAL)) { + xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), + ep_index); + xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", + (le32_to_cpu(event->flags) & + TRB_TYPE_BITMASK)>>10); + xhci_print_trb_offsets(xhci, (union xhci_trb *) event); + } if (ep->skip) { ep->skip = false; xhci_dbg(xhci, "td_list is empty while skip " diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f1f01a834ba7..53b8f89a0b1c 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) * generate interrupts. Don't even try to enable MSI. */ if (xhci->quirks & XHCI_BROKEN_MSI) - return 0; + goto legacy_irq; /* unregister the legacy interrupt */ if (hcd->irq) @@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) return -EINVAL; } + legacy_irq: /* fall back to legacy interrupt*/ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd); @@ -3778,6 +3779,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) return 0; } +/* + * Transfer the port index into real index in the HW port status + * registers. Caculate offset between the port's PORTSC register + * and port status base. Divide the number of per port register + * to get the real index. The raw port number bases 1. + */ +int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; + __le32 __iomem *addr; + int raw_port; + + if (hcd->speed != HCD_USB3) + addr = xhci->usb2_ports[port1 - 1]; + else + addr = xhci->usb3_ports[port1 - 1]; + + raw_port = (addr - base_addr)/NUM_PORT_REGS + 1; + return raw_port; +} + #ifdef CONFIG_USB_SUSPEND /* BESL to HIRD Encoding array for USB2 LPM */ diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f791bd0aee6c..63582719e0fb 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -206,8 +206,8 @@ struct xhci_op_regs { /* bits 12:31 are reserved (and should be preserved on writes). */ /* IMAN - Interrupt Management Register */ -#define IMAN_IP (1 << 1) -#define IMAN_IE (1 << 0) +#define IMAN_IE (1 << 1) +#define IMAN_IP (1 << 0) /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ @@ -972,6 +972,10 @@ struct xhci_transfer_event { __le32 flags; }; +/* Transfer event TRB length bit mask */ +/* bits 0:23 */ +#define EVENT_TRB_LEN(p) ((p) & 0xffffff) + /** Transfer Event bit fields **/ #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) @@ -1829,6 +1833,7 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array, int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); +int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); #ifdef CONFIG_PM int xhci_bus_suspend(struct usb_hcd *hcd); diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 45b19e2c60ba..05e51432dd2f 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -7,11 +7,6 @@ config USB_MUSB_HDRC tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' depends on USB && USB_GADGET - select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) - select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX) - select TWL4030_USB if MACH_OMAP_3430SDP - select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA - select OMAP_CONTROL_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS help Say Y here if your system has a dual role high speed USB diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 7c71769d71ff..41613a2b35e8 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err; - err = musb->int_usb & USB_INTR_VBUSERROR; + err = musb->int_usb & MUSB_INTR_VBUSERROR; if (err) { /* * The Mentor core doesn't debounce VBUS as needed diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 60b41cc28da4..daec6e0f7e38 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1624,8 +1624,6 @@ EXPORT_SYMBOL_GPL(musb_dma_completion); /*-------------------------------------------------------------------------*/ -#ifdef CONFIG_SYSFS - static ssize_t musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1742,8 +1740,6 @@ static const struct attribute_group musb_attr_group = { .attrs = musb_attributes, }; -#endif /* sysfs */ - /* Only used to provide driver mode change events */ static void musb_irq_work(struct work_struct *data) { @@ -1968,11 +1964,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status < 0) goto fail4; -#ifdef CONFIG_SYSFS status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group); if (status) goto fail5; -#endif pm_runtime_put(musb->controller); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index be18537c5f14..83eddedcd9be 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct musb_request *request, static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { - if (!is_buffer_mapped(request)) + struct musb_ep *musb_ep = request->ep; + + if (!is_buffer_mapped(request) || !musb_ep->dma) return; if (request->request.dma == DMA_ADDR_INVALID) { @@ -195,7 +197,10 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - unmap_dma_buffer(req, musb); + + if (!dma_mapping_error(&musb->g.dev, request->dma)) + unmap_dma_buffer(req, musb); + if (request->status == 0) dev_dbg(musb->controller, "%s done request %p, %d/%d\n", ep->end_point.name, request, diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 1762354fe793..1a42a458f2c4 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -51,7 +51,7 @@ struct omap2430_glue { }; #define glue_to_musb(g) platform_get_drvdata(g->musb) -struct omap2430_glue *_glue; +static struct omap2430_glue *_glue; static struct timer_list musb_idle_timer; @@ -237,9 +237,13 @@ void omap_musb_mailbox(enum omap_musb_vbus_id_status status) { struct omap2430_glue *glue = _glue; - if (glue && glue_to_musb(glue)) { - glue->status = status; - } else { + if (!glue) { + pr_err("%s: musb core is not yet initialized\n", __func__); + return; + } + glue->status = status; + + if (!glue_to_musb(glue)) { pr_err("%s: musb core is not yet ready\n", __func__); return; } diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c index e1814397ca3a..2bd03d261a50 100644 --- a/drivers/usb/otg/otg.c +++ b/drivers/usb/otg/otg.c @@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type) spin_lock_irqsave(&phy_lock, flags); phy = __usb_find_phy(&phy_list, type); - if (IS_ERR(phy)) { + if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { pr_err("unable to find transceiver of type %s\n", usb_phy_type_string(type)); goto err0; @@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index) spin_lock_irqsave(&phy_lock, flags); phy = __usb_find_phy_dev(dev, &phy_bind_list, index); - if (IS_ERR(phy)) { + if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { pr_err("unable to find transceiver\n"); goto err0; } @@ -301,8 +301,12 @@ EXPORT_SYMBOL(devm_usb_put_phy); */ void usb_put_phy(struct usb_phy *x) { - if (x) + if (x) { + struct module *owner = x->dev->driver->owner; + put_device(x->dev); + module_put(owner); + } } EXPORT_SYMBOL(usb_put_phy); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 65217a590068..90549382eba5 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -38,6 +38,7 @@ config USB_ISP1301 tristate "NXP ISP1301 USB transceiver support" depends on USB || USB_GADGET depends on I2C + select USB_OTG_UTILS help Say Y here to add support for the NXP ISP1301 USB transceiver driver. This chip is typically used as USB transceiver for USB host, gadget diff --git a/drivers/usb/phy/omap-control-usb.c b/drivers/usb/phy/omap-control-usb.c index 5323b71c3521..1419ceda9759 100644 --- a/drivers/usb/phy/omap-control-usb.c +++ b/drivers/usb/phy/omap-control-usb.c @@ -219,32 +219,26 @@ static int omap_control_usb_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control_dev_conf"); - control_usb->dev_conf = devm_request_and_ioremap(&pdev->dev, res); - if (!control_usb->dev_conf) { - dev_err(&pdev->dev, "Failed to obtain io memory\n"); - return -EADDRNOTAVAIL; - } + control_usb->dev_conf = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(control_usb->dev_conf)) + return PTR_ERR(control_usb->dev_conf); if (control_usb->type == OMAP_CTRL_DEV_TYPE1) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otghs_control"); - control_usb->otghs_control = devm_request_and_ioremap( + control_usb->otghs_control = devm_ioremap_resource( &pdev->dev, res); - if (!control_usb->otghs_control) { - dev_err(&pdev->dev, "Failed to obtain io memory\n"); - return -EADDRNOTAVAIL; - } + if (IS_ERR(control_usb->otghs_control)) + return PTR_ERR(control_usb->otghs_control); } if (control_usb->type == OMAP_CTRL_DEV_TYPE2) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_power_usb"); - control_usb->phy_power = devm_request_and_ioremap( + control_usb->phy_power = devm_ioremap_resource( &pdev->dev, res); - if (!control_usb->phy_power) { - dev_dbg(&pdev->dev, "Failed to obtain io memory\n"); - return -EADDRNOTAVAIL; - } + if (IS_ERR(control_usb->phy_power)) + return PTR_ERR(control_usb->phy_power); control_usb->sys_clk = devm_clk_get(control_usb->dev, "sys_clkin"); diff --git a/drivers/usb/phy/omap-usb3.c b/drivers/usb/phy/omap-usb3.c index fadc0c2b65bb..a6e60b1e102e 100644 --- a/drivers/usb/phy/omap-usb3.c +++ b/drivers/usb/phy/omap-usb3.c @@ -212,11 +212,9 @@ static int omap_usb3_probe(struct platform_device *pdev) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl"); - phy->pll_ctrl_base = devm_request_and_ioremap(&pdev->dev, res); - if (!phy->pll_ctrl_base) { - dev_err(&pdev->dev, "ioremap of pll_ctrl failed\n"); - return -ENOMEM; - } + phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(phy->pll_ctrl_base)) + return PTR_ERR(phy->pll_ctrl_base); phy->dev = &pdev->dev; diff --git a/drivers/usb/phy/samsung-usbphy.c b/drivers/usb/phy/samsung-usbphy.c index 6ea553733832..967101ec15fd 100644 --- a/drivers/usb/phy/samsung-usbphy.c +++ b/drivers/usb/phy/samsung-usbphy.c @@ -787,11 +787,9 @@ static int samsung_usbphy_probe(struct platform_device *pdev) return -ENODEV; } - phy_base = devm_request_and_ioremap(dev, phy_mem); - if (!phy_base) { - dev_err(dev, "%s: register mapping failed\n", __func__); - return -ENXIO; - } + phy_base = devm_ioremap_resource(dev, phy_mem); + if (IS_ERR(phy_base)) + return PTR_ERR(phy_base); sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL); if (!sphy) diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index cbd904b8fba5..4775f8209e55 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *serial) } struct ark3116_private { - wait_queue_head_t delta_msr_wait; struct async_icount icount; int irda; /* 1 for irda device */ @@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb_serial_port *port) if (!priv) return -ENOMEM; - init_waitqueue_head(&priv->delta_msr_wait); mutex_init(&priv->hw_lock); spin_lock_init(&priv->status_lock); @@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_struct *tty, case TIOCMIWAIT: for (;;) { struct async_icount prev = priv->icount; - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + if ((prev.rng == priv->icount.rng) && (prev.dsr == priv->icount.dsr) && (prev.dcd == priv->icount.dcd) && @@ -580,7 +582,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr) priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index d255f66e708e..07d4650a32ab 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table); struct ch341_private { spinlock_t lock; /* access lock */ - wait_queue_head_t delta_msr_wait; /* wait queue for modem status */ unsigned baud_rate; /* set baud rate */ u8 line_control; /* set line control value RTS/DTR */ u8 line_status; /* active status of modem control inputs */ @@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); priv->baud_rate = DEFAULT_BAUD_RATE; priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; @@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on) priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); spin_unlock_irqrestore(&priv->lock, flags); ch341_set_handshake(port->serial->dev, priv->line_control); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } static void ch341_close(struct usb_serial_port *port) @@ -491,7 +489,7 @@ static void ch341_read_int_callback(struct urb *urb) tty_kref_put(tty); } - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } exit: @@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (!multi_change) { - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; multi_change = priv->multi_status_change; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index edc0f0dcad83..4747d1c328ff 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -85,6 +85,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ + { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ @@ -150,6 +151,25 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ + { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ + { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */ + { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */ + { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */ + { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */ + { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */ + { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */ + { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */ + { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */ + { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */ + { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */ + { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ + { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ + { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 8efa19d0e9fb..ba7352e4187e 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -111,7 +111,6 @@ struct cypress_private { int baud_rate; /* stores current baud rate in integer form */ int isthrottled; /* if throttled, discard reads */ - wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */ char prev_status, diff_status; /* used for TIOCMIWAIT */ /* we pass a pointer to this as the argument sent to cypress_set_termios old_termios */ @@ -449,7 +448,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) kfree(priv); return -ENOMEM; } - init_waitqueue_head(&priv->delta_msr_wait); usb_reset_configuration(serial->dev); @@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_struct *tty, switch (cmd) { /* This code comes from drivers/char/serial.c and ftdi_sio.c */ case TIOCMIWAIT: - while (priv != NULL) { - interruptible_sleep_on(&priv->delta_msr_wait); + for (;;) { + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - else { + + if (port->serial->disconnected) + return -EIO; + + { char diff = priv->diff_status; if (diff == 0) return -EIO; /* no change => error */ @@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(struct urb *urb) if (priv->current_status != priv->prev_status) { priv->diff_status |= priv->current_status ^ priv->prev_status; - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); priv->prev_status = priv->current_status; } spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index b1b2dc64b50b..a172ad5c5ce8 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table); struct f81232_private { spinlock_t lock; - wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -111,7 +110,7 @@ static void f81232_process_read_urb(struct urb *urb) line_status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); if (!urb->actual_length) return; @@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); @@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index edd162df49ca..9886180e45f1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -69,9 +69,7 @@ struct ftdi_private { int flags; /* some ASYNC_xxxx flags are supported */ unsigned long last_dtr_rts; /* saved modem control outputs */ struct async_icount icount; - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ char prev_status; /* Used for TIOCMIWAIT */ - bool dev_gone; /* Used to abort TIOCMIWAIT */ char transmit_empty; /* If transmitter is empty or not */ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface (0 for FT232/245) */ @@ -642,6 +640,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, + { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, @@ -1691,10 +1690,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) kref_init(&priv->kref); mutex_init(&priv->cfg_lock); - init_waitqueue_head(&priv->delta_msr_wait); priv->flags = ASYNC_LOW_LATENCY; - priv->dev_gone = false; if (quirk && quirk->port_probe) quirk->port_probe(priv); @@ -1840,8 +1837,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); - priv->dev_gone = true; - wake_up_interruptible_all(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); remove_sysfs_attrs(port); @@ -1989,7 +1985,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, if (diff_status & FTDI_RS0_RLSD) priv->icount.dcd++; - wake_up_interruptible_all(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); priv->prev_status = status; } @@ -2440,11 +2436,15 @@ static int ftdi_ioctl(struct tty_struct *tty, */ case TIOCMIWAIT: cprev = priv->icount; - while (!priv->dev_gone) { - interruptible_sleep_on(&priv->delta_msr_wait); + for (;;) { + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = priv->icount; if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || @@ -2454,8 +2454,6 @@ static int ftdi_ioctl(struct tty_struct *tty, } cprev = cnow; } - return -EIO; - break; case TIOCSERGETLSR: return get_lsr_info(port, (struct serial_struct __user *)arg); break; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 9d359e189a64..e79861eeed4c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -584,6 +584,13 @@ #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ /* + * Mitsubishi Electric Corp. (http://www.meau.com) + * Submitted by Konstantin Holoborodko + */ +#define MITSUBISHI_VID 0x06D3 +#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */ + +/* * Definitions for B&B Electronics products. */ #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 1a07b12ef341..81caf5623ee2 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -956,10 +956,7 @@ static void garmin_close(struct usb_serial_port *port) if (!serial) return; - mutex_lock(&port->serial->disc_mutex); - - if (!port->serial->disconnected) - garmin_clear(garmin_data_p); + garmin_clear(garmin_data_p); /* shutdown our urbs */ usb_kill_urb(port->read_urb); @@ -968,8 +965,6 @@ static void garmin_close(struct usb_serial_port *port) /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) garmin_data_p->state = STATE_DISCONNECTED; - - mutex_unlock(&port->serial->disc_mutex); } diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index b00e5cbf741f..efd8b978128c 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -110,7 +110,6 @@ struct edgeport_port { wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ - wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ @@ -884,7 +883,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) /* initialize our wait queues */ init_waitqueue_head(&edge_port->wait_open); init_waitqueue_head(&edge_port->wait_chase); - init_waitqueue_head(&edge_port->delta_msr_wait); init_waitqueue_head(&edge_port->wait_command); /* initialize our icount structure */ @@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); cprev = edge_port->icount; while (1) { - prepare_to_wait(&edge_port->delta_msr_wait, + prepare_to_wait(&port->delta_msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); - finish_wait(&edge_port->delta_msr_wait, &wait); + finish_wait(&port->delta_msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr) icount->dcd++; if (newMsr & EDGEPORT_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&edge_port->delta_msr_wait); + wake_up_interruptible(&edge_port->port->delta_msr_wait); } /* Save the new modem status */ diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index c23776679f70..7777172206de 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -87,9 +87,6 @@ struct edgeport_port { int close_pending; int lsr_event; struct async_icount icount; - wait_queue_head_t delta_msr_wait; /* for handling sleeping while - waiting for msr change to - happen */ struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ @@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&edge_port->delta_msr_wait); + wake_up_interruptible(&edge_port->port->delta_msr_wait); } /* Save the new modem status */ @@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) dev = port->serial->dev; memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); - init_waitqueue_head(&edge_port->delta_msr_wait); /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); @@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = edge_port->icount; while (1) { - interruptible_sleep_on(&edge_port->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport_2port_device = { .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, + .get_icount = edge_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index a64d420f687b..06d5a60be2c4 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -114,8 +114,6 @@ struct mct_u232_private { unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ struct async_icount icount; - wait_queue_head_t msr_wait; /* for handling sleeping while waiting - for msr change to happen */ }; #define THROTTLED 0x01 @@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->msr_wait); usb_set_serial_port_data(port, priv); @@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(struct urb *urb) tty_kref_put(tty); } #endif - wake_up_interruptible(&priv->msr_wait); + wake_up_interruptible(&port->delta_msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); @@ -810,13 +807,17 @@ static int mct_u232_ioctl(struct tty_struct *tty, cprev = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); for ( ; ; ) { - prepare_to_wait(&mct_u232_port->msr_wait, + prepare_to_wait(&port->delta_msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); - finish_wait(&mct_u232_port->msr_wait, &wait); + finish_wait(&port->delta_msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&mct_u232_port->lock, flags); cnow = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 809fb329eca5..b8051fa61911 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -219,7 +219,6 @@ struct moschip_port { char open; char open_ports; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ - wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ @@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) icount->rng++; smp_wmb(); } + + mos7840_port->delta_msr_cond = 1; + wake_up_interruptible(&port->port->delta_msr_wait); } } @@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) /* initialize our wait queues */ init_waitqueue_head(&mos7840_port->wait_chase); - init_waitqueue_head(&mos7840_port->delta_msr_wait); /* initialize our icount structure */ memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); @@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty, mos7840_port->read_urb_busy = false; } } - wake_up(&mos7840_port->delta_msr_wait); - mos7840_port->delta_msr_cond = 1; dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, mos7840_port->shadowLCR); } @@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_struct *tty, while (1) { /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ mos7840_port->delta_msr_cond = 0; - wait_event_interruptible(mos7840_port->delta_msr_wait, - (mos7840_port-> + wait_event_interruptible(port->delta_msr_wait, + (port->serial->disconnected || + mos7840_port-> delta_msr_cond == 1)); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = mos7840_port->icount; smp_rmb(); if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f7d339d8187b..558adfc05007 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -341,6 +341,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 +#define CINTERION_PRODUCT_AH6 0x0055 +#define CINTERION_PRODUCT_PLS8 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -579,6 +581,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, @@ -1260,6 +1263,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index a958fd41b5b3..87c71ccfee87 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -188,7 +188,6 @@ struct oti6858_private { u8 setup_done; struct delayed_work delayed_setup_work; - wait_queue_head_t intr_wait; struct usb_serial_port *port; /* USB port with which associated */ }; @@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->intr_wait); priv->port = port; INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); @@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - wait_event_interruptible(priv->intr_wait, + wait_event_interruptible(port->delta_msr_wait, + port->serial->disconnected || priv->status.pin_state != prev); if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->status.pin_state & PIN_MASK; spin_unlock_irqrestore(&priv->lock, flags); @@ -763,7 +765,7 @@ static void oti6858_read_int_callback(struct urb *urb) if (!priv->transient) { if (xs->pin_state != priv->status.pin_state) - wake_up_interruptible(&priv->intr_wait); + wake_up_interruptible(&port->delta_msr_wait); memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); } diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 54adc9125e5c..3b10018d89a3 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -139,7 +139,6 @@ struct pl2303_serial_private { struct pl2303_private { spinlock_t lock; - wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); @@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); @@ -719,7 +720,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port, spin_unlock_irqrestore(&priv->lock, flags); if (priv->line_status & UART_BREAK_ERROR) usb_serial_handle_break(port); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); tty = tty_port_tty_get(&port->port); if (!tty) @@ -783,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb) line_status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); if (!urb->actual_length) return; diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 9b1b96f2d095..31f81c3c15eb 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c @@ -69,6 +69,7 @@ static struct usb_device_id id_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 24662547dc5b..59b32b782126 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -197,12 +197,15 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) if (is_gobi1k) { /* Gobi 1K USB layout: - * 0: serial port (doesn't respond) + * 0: DM/DIAG (use libqcdm from ModemManager for communication) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net */ - if (ifnum == 2) + if (ifnum == 0) { + dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n"); + altsetting = 1; + } else if (ifnum == 2) dev_dbg(dev, "Modem port found\n"); else altsetting = -1; diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 00e6c9bac8a3..75f125ddb0c9 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -128,7 +128,6 @@ struct qt2_port_private { u8 shadowLSR; u8 shadowMSR; - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; struct usb_serial_port *port; @@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - wait_event_interruptible(priv->delta_msr_wait, - ((priv->icount.rng != prev.rng) || + wait_event_interruptible(port->delta_msr_wait, + (port->serial->disconnected || + (priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); @@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->lock, flags); @@ -661,7 +664,9 @@ void qt2_process_read_urb(struct urb *urb) __func__); break; } - tty_flip_buffer_push(&port->port); + + if (port_priv->is_open) + tty_flip_buffer_push(&port->port); newport = *(ch + 3); @@ -704,7 +709,8 @@ void qt2_process_read_urb(struct urb *urb) tty_insert_flip_string(&port->port, ch, 1); } - tty_flip_buffer_push(&port->port); + if (port_priv->is_open) + tty_flip_buffer_push(&port->port); } static void qt2_write_bulk_callback(struct urb *urb) @@ -824,7 +830,6 @@ static int qt2_port_probe(struct usb_serial_port *port) spin_lock_init(&port_priv->lock); spin_lock_init(&port_priv->urb_lock); - init_waitqueue_head(&port_priv->delta_msr_wait); port_priv->port = port; port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); @@ -967,7 +972,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) if (newMSR & UART_MSR_TERI) port_priv->icount.rng++; - wake_up_interruptible(&port_priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } } diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 91ff8e3bddbd..549ef68ff5fa 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -149,7 +149,6 @@ enum spcp8x5_type { struct spcp8x5_private { spinlock_t lock; enum spcp8x5_type type; - wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); - init_waitqueue_head(&priv->delta_msr_wait); priv->type = type; usb_set_serial_port_data(port , priv); @@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(struct urb *urb) priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); /* wake up the wait for termios */ - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); if (!urb->actual_length) return; @@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port, while (1) { /* wake up in bulk read */ - interruptible_sleep_on(&priv->delta_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index b57cf841c5b6..4b2a19757b4d 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -61,7 +61,6 @@ struct ssu100_port_private { spinlock_t status_lock; u8 shadowLSR; u8 shadowMSR; - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; }; @@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->status_lock, flags); while (1) { - wait_event_interruptible(priv->delta_msr_wait, - ((priv->icount.rng != prev.rng) || + wait_event_interruptible(port->delta_msr_wait, + (port->serial->disconnected || + (priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); @@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) if (signal_pending(current)) return -ERESTARTSYS; + if (port->serial->disconnected) + return -EIO; + spin_lock_irqsave(&priv->status_lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->status_lock, flags); @@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->status_lock); - init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); @@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; - wake_up_interruptible(&priv->delta_msr_wait); + wake_up_interruptible(&port->delta_msr_wait); } } diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 39cb9b807c3c..73deb029fc05 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -74,7 +74,6 @@ struct ti_port { int tp_flags; int tp_closing_wait;/* in .01 secs */ struct async_icount tp_icount; - wait_queue_head_t tp_msr_wait; /* wait for msr change */ wait_queue_head_t tp_write_wait; struct ti_device *tp_tdev; struct usb_serial_port *tp_port; @@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_serial_port *port) else tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; tport->tp_closing_wait = closing_wait; - init_waitqueue_head(&tport->tp_msr_wait); init_waitqueue_head(&tport->tp_write_wait); if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { kfree(tport); @@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = tport->tp_icount; while (1) { - interruptible_sleep_on(&tport->tp_msr_wait); + interruptible_sleep_on(&port->delta_msr_wait); if (signal_pending(current)) return -ERESTARTSYS; + + if (port->serial->disconnected) + return -EIO; + cnow = tport->tp_icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr) icount->dcd++; if (msr & TI_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&tport->tp_msr_wait); + wake_up_interruptible(&tport->tp_port->delta_msr_wait); spin_unlock_irqrestore(&tport->tp_lock, flags); } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index a19ed74d770d..5d9b178484fd 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref) } } + usb_put_intf(serial->interface); usb_put_dev(serial->dev); kfree(serial); } @@ -620,7 +621,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, } serial->dev = usb_get_dev(dev); serial->type = driver; - serial->interface = interface; + serial->interface = usb_get_intf(interface); kref_init(&serial->kref); mutex_init(&serial->disc_mutex); serial->minor = SERIAL_TTY_NO_MINOR; @@ -902,6 +903,7 @@ static int usb_serial_probe(struct usb_interface *interface, port->port.ops = &serial_port_ops; port->serial = serial; spin_lock_init(&port->lock); + init_waitqueue_head(&port->delta_msr_wait); /* Keep this for private driver use for the moment but should probably go away */ INIT_WORK(&port->work, usb_serial_port_work); diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 7ab9046ae0ec..105d900150c1 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us) return 0; } -/* This places the HUAWEI usb dongles in multi-port mode */ -static int usb_stor_huawei_feature_init(struct us_data *us) +/* This places the HUAWEI E220 devices in multi-port mode */ +int usb_stor_huawei_e220_init(struct us_data *us) { int result; @@ -104,75 +104,3 @@ static int usb_stor_huawei_feature_init(struct us_data *us) US_DEBUGP("Huawei mode set result is %d\n", result); return 0; } - -/* - * It will send a scsi switch command called rewind' to huawei dongle. - * When the dongle receives this command at the first time, - * it will reboot immediately. After rebooted, it will ignore this command. - * So it is unnecessary to read its response. - */ -static int usb_stor_huawei_scsi_init(struct us_data *us) -{ - int result = 0; - int act_len = 0; - struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; - char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - - bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); - bcbw->Tag = 0; - bcbw->DataTransferLength = 0; - bcbw->Flags = bcbw->Lun = 0; - bcbw->Length = sizeof(rewind_cmd); - memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); - memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); - - result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, - US_BULK_CB_WRAP_LEN, &act_len); - US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); - return result; -} - -/* - * It tries to find the supported Huawei USB dongles. - * In Huawei, they assign the following product IDs - * for all of their mobile broadband dongles, - * including the new dongles in the future. - * So if the product ID is not included in this list, - * it means it is not Huawei's mobile broadband dongles. - */ -static int usb_stor_huawei_dongles_pid(struct us_data *us) -{ - struct usb_interface_descriptor *idesc; - int idProduct; - - idesc = &us->pusb_intf->cur_altsetting->desc; - idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct); - /* The first port is CDROM, - * means the dongle in the single port mode, - * and a switch command is required to be sent. */ - if (idesc && idesc->bInterfaceNumber == 0) { - if ((idProduct == 0x1001) - || (idProduct == 0x1003) - || (idProduct == 0x1004) - || (idProduct >= 0x1401 && idProduct <= 0x1500) - || (idProduct >= 0x1505 && idProduct <= 0x1600) - || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { - return 1; - } - } - return 0; -} - -int usb_stor_huawei_init(struct us_data *us) -{ - int result = 0; - - if (usb_stor_huawei_dongles_pid(us)) { - if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446) - result = usb_stor_huawei_scsi_init(us); - else - result = usb_stor_huawei_feature_init(us); - } - return result; -} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 5376d4fc76f0..529327fbb06b 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us); * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI usb dongles in multi-port mode */ -int usb_stor_huawei_init(struct us_data *us); +/* This places the HUAWEI E220 devices in multi-port mode */ +int usb_stor_huawei_e220_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 72923b56bbf6..1799335288bd 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -53,6 +53,14 @@ * as opposed to devices that do something strangely or wrongly. */ +/* In-kernel mode switching is deprecated. Do not add new devices to + * this list for the sole purpose of switching them to a different + * mode. Existing userspace solutions are superior. + * + * New mode switching devices should instead be added to the database + * maintained at http://www.draisberghof.de/usb_modeswitch/ + */ + #if !defined(CONFIG_USB_STORAGE_SDDR09) && \ !defined(CONFIG_USB_STORAGE_SDDR09_MODULE) #define NO_SDDR09 @@ -488,6 +496,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), +/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */ +UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999, + "Samsung", + "YP-Z3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. * Device uses standards-violating 32-byte Bulk Command Block Wrappers and * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. @@ -1527,10 +1542,335 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, /* Reported by fangxiaozhi <huananhu@huawei.com> * This brings the HUAWEI data card devices into multi-port mode */ -UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, +UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, + "HUAWEI MOBILE", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + 0), +UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, "HUAWEI MOBILE", "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 0), /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 05afe883f038..09d2e3ffd6fc 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data, if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { size_t size; + int max = vfio_pci_get_irq_count(vdev, hdr.index); if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) size = sizeof(uint8_t); @@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data, return -EINVAL; if (hdr.argsz - minsz < hdr.count * size || - hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) + hdr.start >= max || hdr.start + hdr.count > max) return -EINVAL; data = memdup_user((void __user *)(arg + minsz), diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 964ff22bf281..aeb00fc2d3be 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -27,6 +27,7 @@ #include <linux/pci.h> #include <linux/uaccess.h> #include <linux/vfio.h> +#include <linux/slab.h> #include "vfio_pci_private.h" diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 3639371fa697..a96509187deb 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -22,6 +22,7 @@ #include <linux/vfio.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <linux/slab.h> #include "vfio_pci_private.h" diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 959b1cd89e6a..ec6fb3fa59bb 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net *net) msg.msg_controllen = 0; ubufs = NULL; } else { - struct ubuf_info *ubuf = &vq->ubuf_info[head]; + struct ubuf_info *ubuf; + ubuf = vq->ubuf_info + vq->upend_idx; vq->heads[vq->upend_idx].len = VHOST_DMA_IN_PROGRESS; diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 9951297b2427..957a0b98a5d9 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -60,14 +60,22 @@ enum { VHOST_SCSI_VQ_IO = 2, }; +/* + * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in + * kernel but disabling it helps. + * TODO: debug and remove the workaround. + */ +enum { + VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX) +}; + #define VHOST_SCSI_MAX_TARGET 256 #define VHOST_SCSI_MAX_VQ 128 struct vhost_scsi { /* Protected by vhost_scsi->dev.mutex */ - struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; + struct tcm_vhost_tpg **vs_tpg; char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; - bool vs_endpoint; struct vhost_dev dev; struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; @@ -570,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work) } } +static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, int head, unsigned out) +{ + struct virtio_scsi_cmd_resp __user *resp; + struct virtio_scsi_cmd_resp rsp; + int ret; + + memset(&rsp, 0, sizeof(rsp)); + rsp.response = VIRTIO_SCSI_S_BAD_TARGET; + resp = vq->iov[out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, head, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); +} + static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { + struct tcm_vhost_tpg **vs_tpg; struct virtio_scsi_cmd_req v_req; struct tcm_vhost_tpg *tv_tpg; struct tcm_vhost_cmd *tv_cmd; @@ -581,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, int head, ret; u8 target; - /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ - if (unlikely(!vs->vs_endpoint)) + /* + * We can handle the vq only after the endpoint is setup by calling the + * VHOST_SCSI_SET_ENDPOINT ioctl. + * + * TODO: Check that we are running from vhost_worker which acts + * as read-side critical section for vhost kind of RCU. + * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h + */ + vs_tpg = rcu_dereference_check(vq->private_data, 1); + if (!vs_tpg) return; mutex_lock(&vq->mutex); @@ -652,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, /* Extract the tpgt */ target = v_req.lun[1]; - tv_tpg = vs->vs_tpg[target]; + tv_tpg = ACCESS_ONCE(vs_tpg[target]); /* Target does not exist, fail the request */ if (unlikely(!tv_tpg)) { - struct virtio_scsi_cmd_resp __user *resp; - struct virtio_scsi_cmd_resp rsp; - - memset(&rsp, 0, sizeof(rsp)); - rsp.response = VIRTIO_SCSI_S_BAD_TARGET; - resp = vq->iov[out].iov_base; - ret = __copy_to_user(resp, &rsp, sizeof(rsp)); - if (!ret) - vhost_add_used_and_signal(&vs->dev, - vq, head, 0); - else - pr_err("Faulted on virtio_scsi_cmd_resp\n"); - + vhost_scsi_send_bad_target(vs, vq, head, out); continue; } @@ -681,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, if (IS_ERR(tv_cmd)) { vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", PTR_ERR(tv_cmd)); - break; + goto err_cmd; } pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" ": %d\n", tv_cmd, exp_data_len, data_direction); tv_cmd->tvc_vhost = vs; tv_cmd->tvc_vq = vq; - - if (unlikely(vq->iov[out].iov_len != - sizeof(struct virtio_scsi_cmd_resp))) { - vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" - " bytes, out: %d, in: %d\n", - vq->iov[out].iov_len, out, in); - break; - } - tv_cmd->tvc_resp = vq->iov[out].iov_base; /* @@ -716,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(tv_cmd->tvc_cdb), TCM_VHOST_MAX_CDB_SIZE); - break; /* TODO */ + goto err_free; } tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; @@ -729,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, data_direction == DMA_TO_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); - break; /* TODO */ + goto err_free; } } @@ -750,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, } mutex_unlock(&vq->mutex); + return; + +err_free: + vhost_scsi_free_cmd(tv_cmd); +err_cmd: + vhost_scsi_send_bad_target(vs, vq, head, out); + mutex_unlock(&vq->mutex); } static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) @@ -771,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) vhost_scsi_handle_vq(vs, vq); } +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ + vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ + int i; + + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) + vhost_scsi_flush_vq(vs, i); + vhost_work_flush(&vs->dev, &vs->vs_completion_work); +} + /* * Called from vhost_scsi_ioctl() context to walk the list of available * tcm_vhost_tpg with an active struct tcm_vhost_nexus @@ -781,8 +815,10 @@ static int vhost_scsi_set_endpoint( { struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_tpg **vs_tpg; + struct vhost_virtqueue *vq; + int index, ret, i, len; bool match = false; - int index, ret; mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ @@ -794,6 +830,15 @@ static int vhost_scsi_set_endpoint( } } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; + vs_tpg = kzalloc(len, GFP_KERNEL); + if (!vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -ENOMEM; + } + if (vs->vs_tpg) + memcpy(vs_tpg, vs->vs_tpg, len); + mutex_lock(&tcm_vhost_mutex); list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { mutex_lock(&tv_tpg->tv_tpg_mutex); @@ -808,14 +853,15 @@ static int vhost_scsi_set_endpoint( tv_tport = tv_tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg[tv_tpg->tport_tpgt]) { + if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { mutex_unlock(&tv_tpg->tv_tpg_mutex); mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vs->dev.mutex); + kfree(vs_tpg); return -EEXIST; } tv_tpg->tv_tpg_vhost_count++; - vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; + vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; smp_mb__after_atomic_inc(); match = true; } @@ -826,12 +872,27 @@ static int vhost_scsi_set_endpoint( if (match) { memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, sizeof(vs->vs_vhost_wwpn)); - vs->vs_endpoint = true; + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { + vq = &vs->vqs[i]; + /* Flushing the vhost_work acts as synchronize_rcu */ + mutex_lock(&vq->mutex); + rcu_assign_pointer(vq->private_data, vs_tpg); + vhost_init_used(vq); + mutex_unlock(&vq->mutex); + } ret = 0; } else { ret = -EEXIST; } + /* + * Act as synchronize_rcu to make sure access to + * old vs->vs_tpg is finished. + */ + vhost_scsi_flush(vs); + kfree(vs->vs_tpg); + vs->vs_tpg = vs_tpg; + mutex_unlock(&vs->dev.mutex); return ret; } @@ -842,6 +903,8 @@ static int vhost_scsi_clear_endpoint( { struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tv_tpg; + struct vhost_virtqueue *vq; + bool match = false; int index, ret, i; u8 target; @@ -850,20 +913,26 @@ static int vhost_scsi_clear_endpoint( for (index = 0; index < vs->dev.nvqs; ++index) { if (!vhost_vq_access_ok(&vs->vqs[index])) { ret = -EFAULT; - goto err; + goto err_dev; } } + + if (!vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return 0; + } + for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { target = i; - tv_tpg = vs->vs_tpg[target]; if (!tv_tpg) continue; + mutex_lock(&tv_tpg->tv_tpg_mutex); tv_tport = tv_tpg->tport; if (!tv_tport) { ret = -ENODEV; - goto err; + goto err_tpg; } if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { @@ -872,20 +941,58 @@ static int vhost_scsi_clear_endpoint( tv_tport->tport_name, tv_tpg->tport_tpgt, t->vhost_wwpn, t->vhost_tpgt); ret = -EINVAL; - goto err; + goto err_tpg; } tv_tpg->tv_tpg_vhost_count--; vs->vs_tpg[target] = NULL; - vs->vs_endpoint = false; + match = true; + mutex_unlock(&tv_tpg->tv_tpg_mutex); } + if (match) { + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { + vq = &vs->vqs[i]; + /* Flushing the vhost_work acts as synchronize_rcu */ + mutex_lock(&vq->mutex); + rcu_assign_pointer(vq->private_data, NULL); + mutex_unlock(&vq->mutex); + } + } + /* + * Act as synchronize_rcu to make sure access to + * old vs->vs_tpg is finished. + */ + vhost_scsi_flush(vs); + kfree(vs->vs_tpg); + vs->vs_tpg = NULL; mutex_unlock(&vs->dev.mutex); + return 0; -err: +err_tpg: + mutex_unlock(&tv_tpg->tv_tpg_mutex); +err_dev: mutex_unlock(&vs->dev.mutex); return ret; } +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_SCSI_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + smp_wmb(); + vhost_scsi_flush(vs); + mutex_unlock(&vs->dev.mutex); + return 0; +} + static int vhost_scsi_open(struct inode *inode, struct file *f) { struct vhost_scsi *s; @@ -926,37 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) return 0; } -static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) -{ - vhost_poll_flush(&vs->dev.vqs[index].poll); -} - -static void vhost_scsi_flush(struct vhost_scsi *vs) -{ - int i; - - for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) - vhost_scsi_flush_vq(vs, i); -} - -static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) -{ - if (features & ~VHOST_FEATURES) - return -EOPNOTSUPP; - - mutex_lock(&vs->dev.mutex); - if ((features & (1 << VHOST_F_LOG_ALL)) && - !vhost_log_access_ok(&vs->dev)) { - mutex_unlock(&vs->dev.mutex); - return -EFAULT; - } - vs->dev.acked_features = features; - smp_wmb(); - vhost_scsi_flush(vs); - mutex_unlock(&vs->dev.mutex); - return 0; -} - static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { @@ -987,7 +1063,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, return -EFAULT; return 0; case VHOST_GET_FEATURES: - features = VHOST_FEATURES; + features = VHOST_SCSI_FEATURES; if (copy_to_user(featurep, &features, sizeof features)) return -EFAULT; return 0; diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 4c1546f71d56..09394657926e 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -2277,7 +2277,7 @@ config XEN_FBDEV_FRONTEND select FB_SYS_IMAGEBLIT select FB_SYS_FOPS select FB_DEFERRED_IO - select INPUT_XEN_KBDDEV_FRONTEND + select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC select XEN_XENBUS_FRONTEND default y help diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 12cf5f31ee8f..025428e04c33 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, = var->bits_per_pixel; break; case 16: + /* Older SOCs use IBGR:555 rather than BGR:565. */ + if (sinfo->have_intensity_bit) + var->green.length = 5; + else + var->green.length = 6; + if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { - /* RGB:565 mode */ - var->red.offset = 11; + /* RGB:5X5 mode */ + var->red.offset = var->green.length + 5; var->blue.offset = 0; } else { - /* BGR:565 mode */ + /* BGR:5X5 mode */ var->red.offset = 0; - var->blue.offset = 11; + var->blue.offset = var->green.length + 5; } var->green.offset = 5; - var->green.length = 6; var->red.length = var->blue.length = 5; break; case 32: @@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red, case FB_VISUAL_PSEUDOCOLOR: if (regno < 256) { - if (cpu_is_at91sam9261() || cpu_is_at91sam9263() - || cpu_is_at91sam9rl()) { + if (sinfo->have_intensity_bit) { /* old style I+BGR:555 */ val = ((red >> 11) & 0x001f); val |= ((green >> 6) & 0x03e0); @@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) } sinfo->info = info; sinfo->pdev = pdev; + if (cpu_is_at91sam9261() || cpu_is_at91sam9263() || + cpu_is_at91sam9rl()) { + sinfo->have_intensity_bit = true; + } strcpy(info->fix.id, sinfo->pdev->name); info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c index 3f2519d30715..e06cd5d90c97 100644 --- a/drivers/video/ep93xx-fb.c +++ b/drivers/video/ep93xx-fb.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/clk.h> #include <linux/fb.h> +#include <linux/io.h> #include <linux/platform_data/video-ep93xx.h> diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 7c254084b6a0..86291dcd964a 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); struct fb_ops *fb; - unsigned long off; + unsigned long mmio_pgoff; unsigned long start; u32 len; if (!info) return -ENODEV; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - off = vma->vm_pgoff << PAGE_SHIFT; fb = info->fbops; if (!fb) return -ENODEV; @@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return res; } - /* frame buffer memory */ + /* + * Ugh. This can be either the frame buffer mapping, or + * if pgoff points past it, the mmio mapping. + */ start = info->fix.smem_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); - if (off >= len) { - /* memory mapped io */ - off -= len; - if (info->var.accel_flags) { - mutex_unlock(&info->mm_lock); - return -EINVAL; - } + len = info->fix.smem_len; + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + if (vma->vm_pgoff >= mmio_pgoff) { + vma->vm_pgoff -= mmio_pgoff; start = info->fix.mmio_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); + len = info->fix.mmio_len; } mutex_unlock(&info->mm_lock); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, off); - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - return 0; + fb_pgprotect(file, vma, start); + + return vm_iomap_memory(vma, start, len); } static int diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 94ad0f71383c..7f6709991a5c 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c @@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm, fbmode->vmode = 0; if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; - if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) + if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) fbmode->vmode |= FB_VMODE_INTERLACED; diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c index 9ed83419038b..84de2632857a 100644 --- a/drivers/video/mmp/core.c +++ b/drivers/video/mmp/core.c @@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path) kfree(path); mutex_unlock(&disp_lock); - - dev_info(path->dev, "de-register %s\n", path->name); } EXPORT_SYMBOL_GPL(mmp_unregister_path); diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 755556ca5b2d..45169cbaba6e 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -169,6 +169,7 @@ struct mxsfb_info { unsigned dotclk_delay; const struct mxsfb_devdata *devdata; int mapped; + u32 sync; }; #define mxsfb_is_v3(host) (host->devdata->ipversion == 3) @@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info) vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; - if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) + if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT) vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; - if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT) + if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT) vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; writel(vdctrl0, host->base + LCDC_VDCTRL0); @@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&fb_info->modelist); + host->sync = pdata->sync; + ret = mxsfb_init_fbinfo(host); if (ret != 0) goto error_init_fb; diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c index ed4cad87fbcd..4a5f2cd3d3bf 100644 --- a/drivers/video/omap/lcd_ams_delta.c +++ b/drivers/video/omap/lcd_ams_delta.c @@ -27,6 +27,7 @@ #include <linux/lcd.h> #include <linux/gpio.h> +#include <mach/hardware.h> #include <mach/board-ams-delta.h> #include "omapfb.h" diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c index 3aa62da89195..7fbe04bce0ed 100644 --- a/drivers/video/omap/lcd_osk.c +++ b/drivers/video/omap/lcd_osk.c @@ -24,7 +24,10 @@ #include <linux/platform_device.h> #include <asm/gpio.h> + +#include <mach/hardware.h> #include <mach/mux.h> + #include "omapfb.h" static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index e31f5b33b501..d40612c31a98 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -32,6 +32,8 @@ #include <linux/omap-dma.h> +#include <mach/hardware.h> + #include "omapfb.h" #include "lcdc.h" diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index 6b6643911d29..048c98381ef6 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c @@ -63,6 +63,9 @@ struct tpo_td043_device { u32 power_on_resume:1; }; +/* used to pass spi_device from SPI to DSS portion of the driver */ +static struct tpo_td043_device *g_tpo_td043; + static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) { struct spi_message m; @@ -403,7 +406,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) static int tpo_td043_probe(struct omap_dss_device *dssdev) { - struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); + struct tpo_td043_device *tpo_td043 = g_tpo_td043; int nreset_gpio = dssdev->reset_gpio; int ret = 0; @@ -440,6 +443,8 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev) if (ret) dev_warn(&dssdev->dev, "failed to create sysfs files\n"); + dev_set_drvdata(&dssdev->dev, tpo_td043); + return 0; fail_gpio_req: @@ -505,6 +510,9 @@ static int tpo_td043_spi_probe(struct spi_device *spi) return -ENODEV; } + if (g_tpo_td043 != NULL) + return -EBUSY; + spi->bits_per_word = 16; spi->mode = SPI_MODE_0; @@ -521,7 +529,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi) tpo_td043->spi = spi; tpo_td043->nreset_gpio = dssdev->reset_gpio; dev_set_drvdata(&spi->dev, tpo_td043); - dev_set_drvdata(&dssdev->dev, tpo_td043); + g_tpo_td043 = tpo_td043; omap_dss_register_driver(&tpo_td043_driver); @@ -534,6 +542,7 @@ static int tpo_td043_spi_remove(struct spi_device *spi) omap_dss_unregister_driver(&tpo_td043_driver); kfree(tpo_td043); + g_tpo_td043 = NULL; return 0; } diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index d7d66ef5cb58..7f791aeda4d2 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c @@ -202,12 +202,10 @@ static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = { static const enum omap_dss_output_id omap4_dss_supported_outputs[] = { /* OMAP_DSS_CHANNEL_LCD */ - OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | - OMAP_DSS_OUTPUT_DSI1, + OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1, /* OMAP_DSS_CHANNEL_DIGIT */ - OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI | - OMAP_DSS_OUTPUT_DPI, + OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI, /* OMAP_DSS_CHANNEL_LCD2 */ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 63203acef812..0264704a52be 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); lcdc_write_chan(ch, LDHAJR, tmp); + lcdc_write_chan_mirror(ch, LDHAJR, tmp); } static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b75db0186488..d4284458377e 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -1973,7 +1973,8 @@ static int uvesafb_init(void) err = -ENOMEM; if (err) { - platform_device_put(uvesafb_device); + if (uvesafb_device) + platform_device_put(uvesafb_device); platform_driver_unregister(&uvesafb_driver); cn_del_callback(&uvesafb_cn_id); return err; diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c index d39dfa4cc235..46d97014342e 100644 --- a/drivers/w1/masters/w1-gpio.c +++ b/drivers/w1/masters/w1-gpio.c @@ -47,11 +47,13 @@ static u8 w1_gpio_read_bit(void *data) return gpio_get_value(pdata->pin) ? 1 : 0; } +#if defined(CONFIG_OF) static struct of_device_id w1_gpio_dt_ids[] = { { .compatible = "w1-gpio" }, {} }; MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids); +#endif static int w1_gpio_probe_dt(struct platform_device *pdev) { @@ -158,7 +160,7 @@ static int w1_gpio_probe(struct platform_device *pdev) return err; } -static int __exit w1_gpio_remove(struct platform_device *pdev) +static int w1_gpio_remove(struct platform_device *pdev) { struct w1_bus_master *master = platform_get_drvdata(pdev); struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; @@ -210,7 +212,7 @@ static struct platform_driver w1_gpio_driver = { .of_match_table = of_match_ptr(w1_gpio_dt_ids), }, .probe = w1_gpio_probe, - .remove = __exit_p(w1_gpio_remove), + .remove = w1_gpio_remove, .suspend = w1_gpio_suspend, .resume = w1_gpio_resume, }; diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 7994d933f040..7ce277d2bb67 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -924,7 +924,8 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb tmp64 = (triplet_ret >> 2); rn |= (tmp64 << i); - if (kthread_should_stop()) { + /* ensure we're called from kthread and not by netlink callback */ + if (!dev->priv && kthread_should_stop()) { mutex_unlock(&dev->bus_mutex); dev_dbg(&dev->dev, "Abort w1_search\n"); return; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9fcc70c11cea..e89fc3133972 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG config AT91RM9200_WATCHDOG tristate "AT91RM9200 watchdog" - depends on ARCH_AT91 + depends on ARCH_AT91RM9200 help Watchdog timer embedded into AT91RM9200 chips. This will reboot your system when the timeout is reached. diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index e3b8f757d2d3..0e9d8c479c35 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -40,13 +40,12 @@ #include "sp5100_tco.h" /* Module and version information */ -#define TCO_VERSION "0.03" +#define TCO_VERSION "0.05" #define TCO_MODULE_NAME "SP5100 TCO timer" #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION /* internal variables */ static u32 tcobase_phys; -static u32 resbase_phys; static u32 tco_wdt_fired; static void __iomem *tcobase; static unsigned int pm_iobase; @@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ static unsigned long timer_alive; static char tco_expect_close; static struct pci_dev *sp5100_tco_pci; -static struct resource wdt_res = { - .name = "Watchdog Timer", - .flags = IORESOURCE_MEM, -}; /* the watchdog platform device */ static struct platform_device *sp5100_tco_platform_device; @@ -75,12 +70,6 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -static unsigned int force_addr; -module_param(force_addr, uint, 0); -MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address." - " ONLY USE THIS PARAMETER IF YOU REALLY KNOW" - " WHAT YOU ARE DOING (default=none)"); - /* * Some TCO specific functions */ @@ -176,39 +165,6 @@ static void tco_timer_enable(void) } } -static void tco_timer_disable(void) -{ - int val; - - if (sp5100_tco_pci->revision >= 0x40) { - /* For SB800 or later */ - /* Enable watchdog decode bit and Disable watchdog timer */ - outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG); - val = inb(SB800_IO_PM_DATA_REG); - val |= SB800_PCI_WATCHDOG_DECODE_EN; - val |= SB800_PM_WATCHDOG_DISABLE; - outb(val, SB800_IO_PM_DATA_REG); - } else { - /* For SP5100 or SB7x0 */ - /* Enable watchdog decode bit */ - pci_read_config_dword(sp5100_tco_pci, - SP5100_PCI_WATCHDOG_MISC_REG, - &val); - - val |= SP5100_PCI_WATCHDOG_DECODE_EN; - - pci_write_config_dword(sp5100_tco_pci, - SP5100_PCI_WATCHDOG_MISC_REG, - val); - - /* Disable Watchdog timer */ - outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG); - val = inb(SP5100_IO_PM_DATA_REG); - val |= SP5100_PM_WATCHDOG_DISABLE; - outb(val, SP5100_IO_PM_DATA_REG); - } -} - /* * /dev/watchdog handling */ @@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void) { struct pci_dev *dev = NULL; const char *dev_name = NULL; - u32 val, tmp_val; + u32 val; u32 index_reg, data_reg, base_addr; /* Match the PCI device */ @@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void) } else pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); - /* - * Lastly re-programming the watchdog timer MMIO address, - * This method is a last resort... - * - * Before re-programming, to ensure that the watchdog timer - * is disabled, disable the watchdog timer. - */ - tco_timer_disable(); - - if (force_addr) { - /* - * Force the use of watchdog timer MMIO address, and aligned to - * 8byte boundary. - */ - force_addr &= ~0x7; - val = force_addr; - - pr_info("Force the use of 0x%04x as MMIO address\n", val); - } else { - /* - * Get empty slot into the resource tree for watchdog timer. - */ - if (allocate_resource(&iomem_resource, - &wdt_res, - SP5100_WDT_MEM_MAP_SIZE, - 0xf0000000, - 0xfffffff8, - 0x8, - NULL, - NULL)) { - pr_err("MMIO allocation failed\n"); - goto unreg_region; - } - - val = resbase_phys = wdt_res.start; - pr_debug("Got 0x%04x from resource tree\n", val); - } - - /* Restore to the low three bits */ - outb(base_addr+0, index_reg); - tmp_val = val | (inb(data_reg) & 0x7); - - /* Re-programming the watchdog timer base address */ - outb(base_addr+0, index_reg); - outb((tmp_val >> 0) & 0xff, data_reg); - outb(base_addr+1, index_reg); - outb((tmp_val >> 8) & 0xff, data_reg); - outb(base_addr+2, index_reg); - outb((tmp_val >> 16) & 0xff, data_reg); - outb(base_addr+3, index_reg); - outb((tmp_val >> 24) & 0xff, data_reg); - - if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, - dev_name)) { - pr_err("MMIO address 0x%04x already in use\n", val); - goto unreg_resource; - } + pr_notice("failed to find MMIO address, giving up.\n"); + goto unreg_region; setup_wdt: tcobase_phys = val; @@ -555,9 +456,6 @@ setup_wdt: unreg_mem_region: release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); -unreg_resource: - if (resbase_phys) - release_resource(&wdt_res); unreg_region: release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); exit: @@ -567,7 +465,6 @@ exit: static int sp5100_tco_init(struct platform_device *dev) { int ret; - char addr_str[16]; /* * Check whether or not the hardware watchdog is there. If found, then @@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev) clear_bit(0, &timer_alive); /* Show module parameters */ - if (force_addr == tcobase_phys) - /* The force_addr is vaild */ - sprintf(addr_str, "0x%04x", force_addr); - else - strcpy(addr_str, "none"); - - pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, " - "force_addr=%s)\n", - tcobase, heartbeat, nowayout, addr_str); + pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", + tcobase, heartbeat, nowayout); return 0; exit: iounmap(tcobase); release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - if (resbase_phys) - release_resource(&wdt_res); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); return ret; } @@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void) misc_deregister(&sp5100_tco_miscdev); iounmap(tcobase); release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); - if (resbase_phys) - release_resource(&wdt_res); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); } diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h index 71594a0c14b7..2b28c00da0df 100644 --- a/drivers/watchdog/sp5100_tco.h +++ b/drivers/watchdog/sp5100_tco.h @@ -57,7 +57,7 @@ #define SB800_PM_WATCHDOG_DISABLE (1 << 2) #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) -#define SB800_ACPI_MMIO_SEL (1 << 2) +#define SB800_ACPI_MMIO_SEL (1 << 1) #define SB800_PM_WDT_MMIO_OFFSET 0xB00 diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5a32232cf7c1..67af155cf602 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -182,7 +182,7 @@ config XEN_PRIVCMD config XEN_STUB bool "Xen stub drivers" - depends on XEN && X86_64 + depends on XEN && X86_64 && BROKEN default n help Allow kernel to install stub drivers, to reserve space for Xen drivers, diff --git a/drivers/xen/events.c b/drivers/xen/events.c index d17aa41a9041..d8cc8127f19c 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -85,8 +85,7 @@ enum xen_irq_type { * event channel - irq->event channel mapping * cpu - cpu this event channel is bound to * index - type-specific information: - * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM - * guest, or GSI (real passthrough IRQ) of the device. + * PIRQ - physical IRQ, GSI, flags, and owner domain * VIRQ - virq number * IPI - IPI vector * EVTCHN - @@ -105,7 +104,6 @@ struct irq_info { struct { unsigned short pirq; unsigned short gsi; - unsigned char vector; unsigned char flags; uint16_t domid; } pirq; @@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq, unsigned short evtchn, unsigned short pirq, unsigned short gsi, - unsigned short vector, uint16_t domid, unsigned char flags) { @@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq, info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; - info->u.pirq.vector = vector; info->u.pirq.domid = domid; info->u.pirq.flags = flags; } @@ -403,11 +399,23 @@ static void unmask_evtchn(int port) if (unlikely((cpu != cpu_from_evtchn(port)))) do_hypercall = 1; - else + else { + /* + * Need to clear the mask before checking pending to + * avoid a race with an event becoming pending. + * + * EVTCHNOP_unmask will only trigger an upcall if the + * mask bit was set, so if a hypercall is needed + * remask the event. + */ + sync_clear_bit(port, BM(&s->evtchn_mask[0])); evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); - if (unlikely(evtchn_pending && xen_hvm_domain())) - do_hypercall = 1; + if (unlikely(evtchn_pending && xen_hvm_domain())) { + sync_set_bit(port, BM(&s->evtchn_mask[0])); + do_hypercall = 1; + } + } /* Slow path (hypercall) if this is a non-local port or if this is * an hvm domain and an event is pending (hvm domains don't have @@ -418,8 +426,6 @@ static void unmask_evtchn(int port) } else { struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); - sync_clear_bit(port, BM(&s->evtchn_mask[0])); - /* * The following is basically the equivalent of * 'hw_resend_irq'. Just like a real IO-APIC we 'lose @@ -509,6 +515,9 @@ static void xen_free_irq(unsigned irq) { struct irq_info *info = irq_get_handler_data(irq); + if (WARN_ON(!info)) + return; + list_del(&info->list); irq_set_handler_data(irq, NULL); @@ -704,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, goto out; } - xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, + xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); pirq_query_unmask(irq); @@ -752,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name, - domid_t domid) + int pirq, const char *name, domid_t domid) { int irq, ret; @@ -766,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); - xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); + xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0); ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; @@ -998,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq) int evtchn = evtchn_from_irq(irq); struct irq_info *info = irq_get_handler_data(irq); + if (WARN_ON(!info)) + return; + mutex_lock(&irq_mapping_update_lock); if (info->refcnt > 0) { @@ -1125,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { + struct irq_info *info = irq_get_handler_data(irq); + + if (WARN_ON(!info)) + return; free_irq(irq, dev_id); unbind_from_irq(irq); } @@ -1306,7 +1321,7 @@ static void __xen_evtchn_do_upcall(void) { int start_word_idx, start_bit_idx; int word_idx, bit_idx; - int i; + int i, irq; int cpu = get_cpu(); struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -1314,6 +1329,8 @@ static void __xen_evtchn_do_upcall(void) do { xen_ulong_t pending_words; + xen_ulong_t pending_bits; + struct irq_desc *desc; vcpu_info->evtchn_upcall_pending = 0; @@ -1325,6 +1342,17 @@ static void __xen_evtchn_do_upcall(void) * selector flag. xchg_xen_ulong must contain an * appropriate barrier. */ + if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { + int evtchn = evtchn_from_irq(irq); + word_idx = evtchn / BITS_PER_LONG; + pending_bits = evtchn % BITS_PER_LONG; + if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { + desc = irq_to_desc(irq); + if (desc) + generic_handle_irq_desc(irq, desc); + } + } + pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); start_word_idx = __this_cpu_read(current_word_idx); @@ -1333,7 +1361,6 @@ static void __xen_evtchn_do_upcall(void) word_idx = start_word_idx; for (i = 0; pending_words != 0; i++) { - xen_ulong_t pending_bits; xen_ulong_t words; words = MASK_LSBS(pending_words, word_idx); @@ -1362,8 +1389,7 @@ static void __xen_evtchn_do_upcall(void) do { xen_ulong_t bits; - int port, irq; - struct irq_desc *desc; + int port; bits = MASK_LSBS(pending_bits, bit_idx); @@ -1436,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq) { struct irq_info *info = info_for_irq(irq); + if (WARN_ON(!info)) + return; + /* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); @@ -1709,7 +1738,12 @@ void xen_poll_irq(int irq) int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); - struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; + struct physdev_irq_status_query irq_status; + + if (WARN_ON(!info)) + return -ENOENT; + + irq_status.irq = info->u.pirq.pirq; if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c index 0ef7c4d40f86..b04fb64c5a91 100644 --- a/drivers/xen/fallback.c +++ b/drivers/xen/fallback.c @@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg) } EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); -int HYPERVISOR_physdev_op_compat(int cmd, void *arg) +int xen_physdev_op_compat(int cmd, void *arg) { struct physdev_op op; int rc; @@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg) return rc; } +EXPORT_SYMBOL_GPL(xen_physdev_op_compat); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 316df65163cf..8abd7d579037 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> +#include <linux/syscore_ops.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/processor.h> @@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex); /* Which ACPI ID we have processed from 'struct acpi_processor'. */ static unsigned long *acpi_ids_done; /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ -static unsigned long __initdata *acpi_id_present; +static unsigned long *acpi_id_present; /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ -static unsigned long __initdata *acpi_id_cst_present; +static unsigned long *acpi_id_cst_present; static int push_cxx_to_hypervisor(struct acpi_processor *_pr) { @@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void) * for_each_[present|online]_cpu macros which are banded to the virtual * CPU amount. */ -static acpi_status __init +static acpi_status read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) { u32 acpi_id; @@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } -static int __init check_acpi_ids(struct acpi_processor *pr_backup) +static int check_acpi_ids(struct acpi_processor *pr_backup) { if (!pr_backup) return -ENODEV; + if (acpi_id_present && acpi_id_cst_present) + /* OK, done this once .. skip to uploading */ + goto upload; + /* All online CPUs have been processed at this stage. Now verify * whether in fact "online CPUs" == physical CPUs. */ @@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup) read_acpi_id, NULL, NULL, NULL); acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); +upload: if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { unsigned int i; for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { @@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup) (void)upload_pm_data(pr_backup); } } - kfree(acpi_id_present); - acpi_id_present = NULL; - kfree(acpi_id_cst_present); - acpi_id_cst_present = NULL; + return 0; } static int __init check_prereq(void) @@ -467,10 +470,47 @@ static void free_acpi_perf_data(void) free_percpu(acpi_perf_data); } -static int __init xen_acpi_processor_init(void) +static int xen_upload_processor_pm_data(void) { struct acpi_processor *pr_backup = NULL; unsigned int i; + int rc = 0; + + pr_info(DRV_NAME "Uploading Xen processor PM info\n"); + + for_each_possible_cpu(i) { + struct acpi_processor *_pr; + _pr = per_cpu(processors, i /* APIC ID */); + if (!_pr) + continue; + + if (!pr_backup) { + pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); + if (pr_backup) + memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); + } + (void)upload_pm_data(_pr); + } + + rc = check_acpi_ids(pr_backup); + kfree(pr_backup); + + return rc; +} + +static void xen_acpi_processor_resume(void) +{ + bitmap_zero(acpi_ids_done, nr_acpi_bits); + xen_upload_processor_pm_data(); +} + +static struct syscore_ops xap_syscore_ops = { + .resume = xen_acpi_processor_resume, +}; + +static int __init xen_acpi_processor_init(void) +{ + unsigned int i; int rc = check_prereq(); if (rc) @@ -500,38 +540,26 @@ static int __init xen_acpi_processor_init(void) (void)acpi_processor_preregister_performance(acpi_perf_data); for_each_possible_cpu(i) { + struct acpi_processor *pr; struct acpi_processor_performance *perf; + pr = per_cpu(processors, i); perf = per_cpu_ptr(acpi_perf_data, i); - rc = acpi_processor_register_performance(perf, i); - if (rc) - goto err_out; - } - rc = acpi_processor_notify_smm(THIS_MODULE); - if (rc) - goto err_unregister; - - for_each_possible_cpu(i) { - struct acpi_processor *_pr; - _pr = per_cpu(processors, i /* APIC ID */); - if (!_pr) + if (!pr) continue; - if (!pr_backup) { - pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); - if (pr_backup) - memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); - } - (void)upload_pm_data(_pr); + pr->performance = perf; + rc = acpi_processor_get_performance_info(pr); + if (rc) + goto err_out; } - rc = check_acpi_ids(pr_backup); - - kfree(pr_backup); - pr_backup = NULL; + rc = xen_upload_processor_pm_data(); if (rc) goto err_unregister; + register_syscore_ops(&xap_syscore_ops); + return 0; err_unregister: for_each_possible_cpu(i) { @@ -549,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void) { int i; + unregister_syscore_ops(&xap_syscore_ops); kfree(acpi_ids_done); + kfree(acpi_id_present); + kfree(acpi_id_cst_present); for_each_possible_cpu(i) { struct acpi_processor_performance *perf; perf = per_cpu_ptr(acpi_perf_data, i); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9204126f1560..a2278ba7fb27 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -17,6 +17,7 @@ #include <xen/events.h> #include <asm/xen/pci.h> #include <asm/xen/hypervisor.h> +#include <xen/interface/physdev.h> #include "pciback.h" #include "conf_space.h" #include "conf_space_quirks.h" @@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) static void pcistub_device_release(struct kref *kref) { struct pcistub_device *psdev; + struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data; psdev = container_of(kref, struct pcistub_device, kref); - dev_data = pci_get_drvdata(psdev->dev); + dev = psdev->dev; + dev_data = pci_get_drvdata(dev); - dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); + dev_dbg(&dev->dev, "pcistub_device_release\n"); - xen_unregister_device_domain_owner(psdev->dev); + xen_unregister_device_domain_owner(dev); /* Call the reset function which does not take lock as this * is called from "unbind" which takes a device_lock mutex. */ - __pci_reset_function_locked(psdev->dev); - if (pci_load_and_free_saved_state(psdev->dev, - &dev_data->pci_saved_state)) { - dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); - } else - pci_restore_state(psdev->dev); + __pci_reset_function_locked(dev); + if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) + dev_dbg(&dev->dev, "Could not reload PCI state\n"); + else + pci_restore_state(dev); + + if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { + struct physdev_pci_device ppdev = { + .seg = pci_domain_nr(dev->bus), + .bus = dev->bus->number, + .devfn = dev->devfn + }; + int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, + &ppdev); + + if (err) + dev_warn(&dev->dev, "MSI-X release failed (%d)\n", + err); + } /* Disable the device */ - xen_pcibk_reset_device(psdev->dev); + xen_pcibk_reset_device(dev); kfree(dev_data); - pci_set_drvdata(psdev->dev, NULL); + pci_set_drvdata(dev, NULL); /* Clean-up the device */ - xen_pcibk_config_free_dyn_fields(psdev->dev); - xen_pcibk_config_free_dev(psdev->dev); + xen_pcibk_config_free_dyn_fields(dev); + xen_pcibk_config_free_dev(dev); - psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; - pci_dev_put(psdev->dev); + dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; + pci_dev_put(dev); kfree(psdev); } @@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; + if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { + struct physdev_pci_device ppdev = { + .seg = pci_domain_nr(dev->bus), + .bus = dev->bus->number, + .devfn = dev->devfn + }; + + err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); + if (err) + dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", + err); + } + /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 37c1f825f513..b98cf0c35725 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -113,7 +113,8 @@ void xen_pcibk_reset_device(struct pci_dev *dev) if (dev->msi_enabled) pci_disable_msi(dev); #endif - pci_disable_device(dev); + if (pci_is_enabled(dev)) + pci_disable_device(dev); pci_write_config_word(dev, PCI_COMMAND, 0); diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c index d85e411cbf89..bbef194c5b01 100644 --- a/drivers/xen/xen-stub.c +++ b/drivers/xen/xen-stub.c @@ -25,7 +25,6 @@ #include <linux/export.h> #include <linux/types.h> #include <linux/acpi.h> -#include <acpi/acpi_drivers.h> #include <xen/acpi.h> #ifdef CONFIG_ACPI @@ -1029,9 +1029,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) spin_unlock(&info->ring_lock); out: - kunmap_atomic(ring); dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, (unsigned long)ring->head, (unsigned long)ring->tail); + kunmap_atomic(ring); return ret; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 3939829f6c5c..86af964c2425 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1137,6 +1137,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, goto whole; if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) goto whole; + return 0; } /* Do not dump I/O mapped devices or special mappings */ @@ -1428,8 +1428,6 @@ void bio_endio(struct bio *bio, int error) else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) error = -EIO; - trace_block_bio_complete(bio, error); - if (bio->bi_end_io) bio->bi_end_io(bio, error); } diff --git a/fs/block_dev.c b/fs/block_dev.c index aea605c98ba6..aae187a7f94a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev) ihold(bdev->bd_inode); return bdev; } +EXPORT_SYMBOL(bdgrab); long nr_blockdev_pages(void) { diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index ecd25a1b4e51..ca9d8f1a3bb6 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, if (tree_mod_dont_log(fs_info, NULL)) return 0; + __tree_mod_log_free_eb(fs_info, old_root); + ret = tree_mod_alloc(fs_info, flags, &tm); if (ret < 0) goto out; @@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) static noinline void tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, struct extent_buffer *src, unsigned long dst_offset, - unsigned long src_offset, int nr_items) + unsigned long src_offset, int nr_items, int log_removal) { int ret; int i; @@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, } for (i = 0; i < nr_items; i++) { - ret = tree_mod_log_insert_key_locked(fs_info, src, - i + src_offset, - MOD_LOG_KEY_REMOVE); - BUG_ON(ret < 0); + if (log_removal) { + ret = tree_mod_log_insert_key_locked(fs_info, src, + i + src_offset, + MOD_LOG_KEY_REMOVE); + BUG_ON(ret < 0); + } ret = tree_mod_log_insert_key_locked(fs_info, dst, i + dst_offset, MOD_LOG_KEY_ADD); @@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, buf, 1, 1); BUG_ON(ret); /* -ENOMEM */ } - tree_mod_log_free_eb(root->fs_info, buf); clean_tree_block(trans, root, buf); *last_ref = 1; } @@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_set_node_ptr_generation(parent, parent_slot, trans->transid); btrfs_mark_buffer_dirty(parent); + tree_mod_log_free_eb(root->fs_info, buf); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref); } @@ -1750,7 +1754,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, goto enospc; } - tree_mod_log_free_eb(root->fs_info, root->node); tree_mod_log_set_root_pointer(root, child); rcu_assign_pointer(root->node, child); @@ -2995,7 +2998,7 @@ static int push_node_left(struct btrfs_trans_handle *trans, push_items = min(src_nritems - 8, push_items); tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, - push_items); + push_items, 1); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), btrfs_node_key_ptr_offset(0), @@ -3066,7 +3069,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, sizeof(struct btrfs_key_ptr)); tree_mod_log_eb_copy(root->fs_info, dst, src, 0, - src_nritems - push_items, push_items); + src_nritems - push_items, push_items, 1); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(src_nritems - push_items), @@ -3218,12 +3221,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans, int mid; int ret; u32 c_nritems; + int tree_mod_log_removal = 1; c = path->nodes[level]; WARN_ON(btrfs_header_generation(c) != trans->transid); if (c == root->node) { /* trying to split the root, lets make a new one */ ret = insert_new_root(trans, root, path, level + 1); + /* + * removal of root nodes has been logged by + * tree_mod_log_set_root_pointer due to locking + */ + tree_mod_log_removal = 0; if (ret) return ret; } else { @@ -3261,7 +3270,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans, (unsigned long)btrfs_header_chunk_tree_uuid(split), BTRFS_UUID_SIZE); - tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); + tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid, + tree_mod_log_removal); copy_extent_buffer(split, c, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(mid), diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7d84651e850b..6d19a0a554aa 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1291,6 +1291,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 0, objectid, NULL, 0, 0, 0); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); + leaf = NULL; goto fail; } @@ -1334,11 +1335,16 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, btrfs_tree_unlock(leaf); + return root; + fail: - if (ret) - return ERR_PTR(ret); + if (leaf) { + btrfs_tree_unlock(leaf); + free_extent_buffer(leaf); + } + kfree(root); - return root; + return ERR_PTR(ret); } static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, @@ -3253,7 +3259,7 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); - if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { btrfs_free_log(NULL, root); btrfs_free_log_root_tree(NULL, fs_info); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3e074dab2d57..3d551231caba 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -257,7 +257,8 @@ static int exclude_super_stripes(struct btrfs_root *root, cache->bytes_super += stripe_len; ret = add_excluded_extent(root, cache->key.objectid, stripe_len); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; } for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { @@ -265,13 +266,17 @@ static int exclude_super_stripes(struct btrfs_root *root, ret = btrfs_rmap_block(&root->fs_info->mapping_tree, cache->key.objectid, bytenr, 0, &logical, &nr, &stripe_len); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; while (nr--) { cache->bytes_super += stripe_len; ret = add_excluded_extent(root, logical[nr], stripe_len); - BUG_ON(ret); /* -ENOMEM */ + if (ret) { + kfree(logical); + return ret; + } } kfree(logical); @@ -1467,8 +1472,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, if (ret && !insert) { err = -ENOENT; goto out; + } else if (ret) { + err = -EIO; + WARN_ON(1); + goto out; } - BUG_ON(ret); /* Corruption */ leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); @@ -4435,7 +4443,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) spin_lock(&sinfo->lock); spin_lock(&block_rsv->lock); - block_rsv->size = num_bytes; + block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + sinfo->bytes_reserved + sinfo->bytes_readonly + @@ -4790,14 +4798,49 @@ out_fail: * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers * so we can just reduce our inodes csum bytes and carry on. - * Otherwise we have to do the normal free thing to account for - * the case that the free side didn't free up its reserve - * because of this outstanding reservation. */ - if (BTRFS_I(inode)->csum_bytes == csum_bytes) + if (BTRFS_I(inode)->csum_bytes == csum_bytes) { calc_csum_metadata_size(inode, num_bytes, 0); - else - to_free = calc_csum_metadata_size(inode, num_bytes, 0); + } else { + u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; + u64 bytes; + + /* + * This is tricky, but first we need to figure out how much we + * free'd from any free-ers that occured during this + * reservation, so we reset ->csum_bytes to the csum_bytes + * before we dropped our lock, and then call the free for the + * number of bytes that were freed while we were trying our + * reservation. + */ + bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; + BTRFS_I(inode)->csum_bytes = csum_bytes; + to_free = calc_csum_metadata_size(inode, bytes, 0); + + + /* + * Now we need to see how much we would have freed had we not + * been making this reservation and our ->csum_bytes were not + * artificially inflated. + */ + BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; + bytes = csum_bytes - orig_csum_bytes; + bytes = calc_csum_metadata_size(inode, bytes, 0); + + /* + * Now reset ->csum_bytes to what it should be. If bytes is + * more than to_free then we would have free'd more space had we + * not had an artificially high ->csum_bytes, so we need to free + * the remainder. If bytes is the same or less then we don't + * need to do anything, the other free-ers did the correct + * thing. + */ + BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; + if (bytes > to_free) + to_free = bytes - to_free; + else + to_free = 0; + } spin_unlock(&BTRFS_I(inode)->lock); if (dropped) to_free += btrfs_calc_trans_metadata_size(root, dropped); @@ -7944,7 +7987,17 @@ int btrfs_read_block_groups(struct btrfs_root *root) * info has super bytes accounted for, otherwise we'll think * we have more space than we actually do. */ - exclude_super_stripes(root, cache); + ret = exclude_super_stripes(root, cache); + if (ret) { + /* + * We may have excluded something, so call this just in + * case. + */ + free_excluded_extents(root, cache); + kfree(cache->free_space_ctl); + kfree(cache); + goto error; + } /* * check for two cases, either we are full, and therefore @@ -8086,7 +8139,17 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; - exclude_super_stripes(root, cache); + ret = exclude_super_stripes(root, cache); + if (ret) { + /* + * We may have excluded something, so call this just in + * case. + */ + free_excluded_extents(root, cache); + kfree(cache->free_space_ctl); + kfree(cache); + return ret; + } add_new_free_space(cache, root->fs_info, chunk_offset, chunk_offset + size); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f173c5af6461..cdee391fc7bf 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) GFP_NOFS); } +int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + struct page *page; + + while (index <= end_index) { + page = find_get_page(inode->i_mapping, index); + BUG_ON(!page); /* Pages should be in the extent_io_tree */ + clear_page_dirty_for_io(page); + page_cache_release(page); + index++; + } + return 0; +} + +int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + struct page *page; + + while (index <= end_index) { + page = find_get_page(inode->i_mapping, index); + BUG_ON(!page); /* Pages should be in the extent_io_tree */ + account_page_redirty(page); + __set_page_dirty_nobuffers(page); + page_cache_release(page); + index++; + } + return 0; +} + /* * helper function to set both pages and extents in the tree writeback */ diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6068a1985560..258c92156857 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long *map_len); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); +int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); +int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index ec160202be3e..c4628a201cb3 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -118,9 +118,11 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); csums_in_item /= csum_size; - if (csum_offset >= csums_in_item) { + if (csum_offset == csums_in_item) { ret = -EFBIG; goto fail; + } else if (csum_offset > csums_in_item) { + goto fail; } } item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); @@ -728,7 +730,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, return -ENOMEM; sector_sum = sums->sums; - trans->adding_csums = 1; again: next_offset = (u64)-1; found_next = 0; @@ -899,7 +900,6 @@ next_sector: goto again; } out: - trans->adding_csums = 0; btrfs_free_path(path); return ret; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index af1d0605a5c1..ade03e6f7bd2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -591,6 +591,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, } compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); clear_bit(EXTENT_FLAG_PINNED, &em->flags); + clear_bit(EXTENT_FLAG_LOGGING, &flags); remove_extent_mapping(em_tree, em); if (no_splits) goto next; @@ -2141,6 +2142,7 @@ static long btrfs_fallocate(struct file *file, int mode, { struct inode *inode = file_inode(file); struct extent_state *cached_state = NULL; + struct btrfs_root *root = BTRFS_I(inode)->root; u64 cur_offset; u64 last_byte; u64 alloc_start; @@ -2168,6 +2170,11 @@ static long btrfs_fallocate(struct file *file, int mode, ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); if (ret) return ret; + if (root->fs_info->quota_enabled) { + ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start); + if (ret) + goto out_reserve_fail; + } /* * wait for ordered IO before we have any locks. We'll loop again @@ -2271,6 +2278,9 @@ static long btrfs_fallocate(struct file *file, int mode, &cached_state, GFP_NOFS); out: mutex_unlock(&inode->i_mutex); + if (root->fs_info->quota_enabled) + btrfs_qgroup_free(root, alloc_end - alloc_start); +out_reserve_fail: /* Let go of our reservation. */ btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d1470adca8f8..09c58a35b429 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode, int i; int will_compress; int compress_type = root->fs_info->compress_type; + int redirty = 0; /* if this is a small write inside eof, kick off a defrag */ if ((end - start + 1) < 16 * 1024 && @@ -415,6 +416,17 @@ again: if (BTRFS_I(inode)->force_compress) compress_type = BTRFS_I(inode)->force_compress; + /* + * we need to call clear_page_dirty_for_io on each + * page in the range. Otherwise applications with the file + * mmap'd can wander in and change the page contents while + * we are compressing them. + * + * If the compression fails for any reason, we set the pages + * dirty again later on. + */ + extent_range_clear_dirty_for_io(inode, start, end); + redirty = 1; ret = btrfs_compress_pages(compress_type, inode->i_mapping, start, total_compressed, pages, @@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed: __set_page_dirty_nobuffers(locked_page); /* unlocked later on in the async handlers */ } + if (redirty) + extent_range_redirty_for_io(inode, start, end); add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0, BTRFS_COMPRESS_NONE); *num_added += 1; @@ -1743,8 +1757,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sum; list_for_each_entry(sum, list, list) { + trans->adding_csums = 1; btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root->fs_info->csum_root, sum); + trans->adding_csums = 0; } return 0; } @@ -2312,6 +2328,7 @@ again: key.type = BTRFS_EXTENT_DATA_KEY; key.offset = start; + path->leave_spinning = 1; if (merge) { struct btrfs_file_extent_item *fi; u64 extent_len; @@ -2368,6 +2385,7 @@ again: btrfs_mark_buffer_dirty(leaf); inode_add_bytes(inode, len); + btrfs_release_path(path); ret = btrfs_inc_extent_ref(trans, root, new->bytenr, new->disk_len, 0, @@ -2381,6 +2399,7 @@ again: ret = 1; out_free_path: btrfs_release_path(path); + path->leave_spinning = 0; btrfs_end_transaction(trans, root); out_unlock: unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, @@ -3676,11 +3695,9 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, * 1 for the dir item * 1 for the dir index * 1 for the inode ref - * 1 for the inode ref in the tree log - * 2 for the dir entries in the log * 1 for the inode */ - trans = btrfs_start_transaction(root, 8); + trans = btrfs_start_transaction(root, 5); if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) return trans; @@ -8124,7 +8141,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items * should cover the worst case number of items we'll modify. */ - trans = btrfs_start_transaction(root, 20); + trans = btrfs_start_transaction(root, 11); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_notrans; diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index ca52681e5f40..b81e0e9a4894 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -26,7 +26,6 @@ void btrfs_tree_lock(struct extent_buffer *eb); void btrfs_tree_unlock(struct extent_buffer *eb); -int btrfs_try_spin_lock(struct extent_buffer *eb); void btrfs_tree_read_lock(struct extent_buffer *eb); void btrfs_tree_read_unlock(struct extent_buffer *eb); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index dc08d77b717e..005c45db699e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -557,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) INIT_LIST_HEAD(&splice); INIT_LIST_HEAD(&works); + mutex_lock(&root->fs_info->ordered_operations_mutex); spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); while (!list_empty(&splice)) { @@ -600,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) cond_resched(); } + mutex_unlock(&root->fs_info->ordered_operations_mutex); } /* diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index aee4b1cc3d98..b44124dd2370 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1153,7 +1153,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, sgn > 0 ? node->seq - 1 : node->seq, &roots); if (ret < 0) - goto out; + return ret; spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; @@ -1275,7 +1275,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ret = 0; unlock: spin_unlock(&fs_info->qgroup_lock); -out: ulist_free(roots); ulist_free(tmp); @@ -1525,21 +1524,23 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && qg->reserved + qg->rfer + num_bytes > - qg->max_rfer) + qg->max_rfer) { ret = -EDQUOT; + goto out; + } if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && qg->reserved + qg->excl + num_bytes > - qg->max_excl) + qg->max_excl) { ret = -EDQUOT; + goto out; + } list_for_each_entry(glist, &qg->groups, next_group) { ulist_add(ulist, glist->group->qgroupid, (uintptr_t)glist->group, GFP_ATOMIC); } } - if (ret) - goto out; /* * no limits exceeded, now record the reservation into all qgroups diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 53c3501fa4ca..85e072b956d5 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -542,7 +542,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size_nr(eb, path->slots[0]); - btrfs_release_path(path); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { @@ -558,7 +557,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) ret < 0 ? -1 : ref_level, ret < 0 ? -1 : ref_root); } while (ret != 1); + btrfs_release_path(path); } else { + btrfs_release_path(path); swarn.path = path; swarn.dev = dev; iterate_extent_inodes(fs_info, found_key.objectid, diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index f7a8b861058b..c85e7c6b4598 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -3945,12 +3945,10 @@ static int is_extent_unchanged(struct send_ctx *sctx, found_key.type != key.type) { key.offset += right_len; break; - } else { - if (found_key.offset != key.offset + right_len) { - /* Should really not happen */ - ret = -EIO; - goto out; - } + } + if (found_key.offset != key.offset + right_len) { + ret = 0; + goto out; } key = found_key; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 9250b9c4f01e..50767bbaad6c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -625,14 +625,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); trans->block_rsv = NULL; - /* - * the same root has to be passed to start_transaction and - * end_transaction. Subvolume quota depends on this. - */ - WARN_ON(trans->root != root); if (trans->qgroup_reserved) { - btrfs_qgroup_free(root, trans->qgroup_reserved); + /* + * the same root has to be passed here between start_transaction + * and end_transaction. Subvolume quota depends on this. + */ + btrfs_qgroup_free(trans->root, trans->qgroup_reserved); trans->qgroup_reserved = 0; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 451fad96ecd1..ef96381569a4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -317,6 +317,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, unsigned long src_ptr; unsigned long dst_ptr; int overwrite_root = 0; + bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) overwrite_root = 1; @@ -326,6 +327,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, /* look for the key in the destination tree */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); + if (ret < 0) + return ret; + if (ret == 0) { char *src_copy; char *dst_copy; @@ -367,6 +371,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, return 0; } + /* + * We need to load the old nbytes into the inode so when we + * replay the extents we've logged we get the right nbytes. + */ + if (inode_item) { + struct btrfs_inode_item *item; + u64 nbytes; + + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_item); + nbytes = btrfs_inode_nbytes(path->nodes[0], item); + item = btrfs_item_ptr(eb, slot, + struct btrfs_inode_item); + btrfs_set_inode_nbytes(eb, item, nbytes); + } + } else if (inode_item) { + struct btrfs_inode_item *item; + + /* + * New inode, set nbytes to 0 so that the nbytes comes out + * properly when we replay the extents. + */ + item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); + btrfs_set_inode_nbytes(eb, item, 0); } insert: btrfs_release_path(path); @@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, int found_type; u64 extent_end; u64 start = key->offset; - u64 saved_nbytes; + u64 nbytes = 0; struct btrfs_file_extent_item *item; struct inode *inode = NULL; unsigned long size; @@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_REG || - found_type == BTRFS_FILE_EXTENT_PREALLOC) - extent_end = start + btrfs_file_extent_num_bytes(eb, item); - else if (found_type == BTRFS_FILE_EXTENT_INLINE) { + found_type == BTRFS_FILE_EXTENT_PREALLOC) { + nbytes = btrfs_file_extent_num_bytes(eb, item); + extent_end = start + nbytes; + + /* + * We don't add to the inodes nbytes if we are prealloc or a + * hole. + */ + if (btrfs_file_extent_disk_bytenr(eb, item) == 0) + nbytes = 0; + } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); + nbytes = btrfs_file_extent_ram_bytes(eb, item); extent_end = ALIGN(start + size, root->sectorsize); } else { ret = 0; @@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } btrfs_release_path(path); - saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); BUG_ON(ret); @@ -635,7 +671,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); } - inode_set_bytes(inode, saved_nbytes); + inode_add_bytes(inode, nbytes); ret = btrfs_update_inode(trans, root, inode); out: if (inode) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6b9cff42265d..2854c824ab64 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -684,6 +684,12 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) __btrfs_close_devices(fs_devices); free_fs_devices(fs_devices); } + /* + * Wait for rcu kworkers under __btrfs_close_devices + * to finish all blkdev_puts so device is really + * free when umount is done. + */ + rcu_barrier(); return ret; } @@ -4929,7 +4935,18 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, em = lookup_extent_mapping(em_tree, chunk_start, 1); read_unlock(&em_tree->lock); - BUG_ON(!em || em->start != chunk_start); + if (!em) { + printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n", + chunk_start); + return -EIO; + } + + if (em->start != chunk_start) { + printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n", + em->start, chunk_start); + free_extent_map(em); + return -EIO; + } map = (struct map_lookup *)em->bdev; length = em->len; diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index cfd1ce34e0bc..1d36db114772 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c @@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length, } } - /* mechlistMIC */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - /* Check if we have reached the end of the blob, but with - no mechListMic (e.g. NTLMSSP instead of KRB5) */ - if (ctx.error == ASN1_ERR_DEC_EMPTY) - goto decode_negtoken_exit; - cFYI(1, "Error decoding last part negTokenInit exit3"); - return 0; - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { - /* tag = 3 indicating mechListMIC */ - cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - return 0; - } - - /* sequence */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - cFYI(1, "Error decoding last part negTokenInit exit5"); - return 0; - } else if ((cls != ASN1_UNI) || (con != ASN1_CON) - || (tag != ASN1_SEQ)) { - cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - } - - /* sequence of */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - cFYI(1, "Error decoding last part negTokenInit exit 7"); - return 0; - } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { - cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - return 0; - } - - /* general string */ - if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { - cFYI(1, "Error decoding last part negTokenInit exit9"); - return 0; - } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) - || (tag != ASN1_GENSTR)) { - cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)", - cls, con, tag, end, *end); - return 0; - } - cFYI(1, "Need to call asn1_octets_decode() function for %s", - ctx.pointer); /* is this UTF-8 or ASCII? */ -decode_negtoken_exit: + /* + * We currently ignore anything at the end of the SPNEGO blob after + * the mechTypes have been parsed, since none of that info is + * used at the moment. + */ return 1; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 1a052c0eee8e..345fc89c4286 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq; __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; #endif +/* + * Bumps refcount for cifs super block. + * Note that it should be only called if a referece to VFS super block is + * already held, e.g. in open-type syscalls context. Otherwise it can race with + * atomic_dec_and_test in deactivate_locked_super. + */ +void +cifs_sb_active(struct super_block *sb) +{ + struct cifs_sb_info *server = CIFS_SB(sb); + + if (atomic_inc_return(&server->active) == 1) + atomic_inc(&sb->s_active); +} + +void +cifs_sb_deactive(struct super_block *sb) +{ + struct cifs_sb_info *server = CIFS_SB(sb); + + if (atomic_dec_and_test(&server->active)) + deactivate_super(sb); +} + static int cifs_read_super(struct super_block *sb) { @@ -777,6 +801,7 @@ struct file_system_type cifs_fs_type = { .kill_sb = cifs_kill_sb, /* .fs_flags */ }; +MODULE_ALIAS_FS("cifs"); const struct inode_operations cifs_dir_inode_ops = { .create = cifs_create, .atomic_open = cifs_atomic_open, diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 7163419cecd9..0e32c3446ce9 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type; extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; +/* Functions related to super block operations */ +extern void cifs_sb_active(struct super_block *sb); +extern void cifs_sb_deactive(struct super_block *sb); + /* Functions related to inodes */ extern const struct inode_operations cifs_dir_inode_ops; extern struct inode *cifs_root_iget(struct super_block *); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 991c63c6bdd0..21b3a291c327 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, } break; case Opt_blank_pass: - vol->password = NULL; - break; - case Opt_pass: /* passwords have to be handled differently * to allow the character used for deliminator * to be passed within them */ + /* + * Check if this is a case where the password + * starts with a delimiter + */ + tmp_end = strchr(data, '='); + tmp_end++; + if (!(tmp_end < end && tmp_end[1] == delim)) { + /* No it is not. Set the password to NULL */ + vol->password = NULL; + break; + } + /* Yes it is. Drop down to Opt_pass below.*/ + case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); value++; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8c0d85577314..7a0dd99e4507 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, INIT_WORK(&cfile->oplock_break, cifs_oplock_break); mutex_init(&cfile->fh_mutex); + cifs_sb_active(inode->i_sb); + /* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None. @@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct super_block *sb = inode->i_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; @@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) cifs_put_tlink(cifs_file->tlink); dput(cifs_file->dentry); + cifs_sb_deactive(sb); kfree(cifs_file); } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0079696305c9..20887bf63121 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc != 0) { - rc = -ETXTBSY; + rc = -EBUSY; goto undo_setattr; } @@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, if (rc == -ENOENT) rc = 0; else if (rc != 0) { - rc = -ETXTBSY; + rc = -EBUSY; goto undo_rename; } cifsInode->delete_pending = true; @@ -1169,15 +1169,13 @@ psx_del_no_retry: cifs_drop_nlink(inode); } else if (rc == -ENOENT) { d_drop(dentry); - } else if (rc == -ETXTBSY) { + } else if (rc == -EBUSY) { if (server->ops->rename_pending_delete) { rc = server->ops->rename_pending_delete(full_path, dentry, xid); if (rc == 0) cifs_drop_nlink(inode); } - if (rc == -ETXTBSY) - rc = -EBUSY; } else if ((rc == -EACCES) && (dosattr == 0) && inode) { attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); if (attrs == NULL) { @@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, * source. Note that cross directory moves do not work with * rename by filehandle to various Windows servers. */ - if (rc == 0 || rc != -ETXTBSY) + if (rc == 0 || rc != -EBUSY) goto do_rename_exit; /* open-file renames don't work across directories */ diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index a82bc51fdc82..c0b25b28be6c 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -62,7 +62,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { {ERRdiffdevice, -EXDEV}, {ERRnofiles, -ENOENT}, {ERRwriteprot, -EROFS}, - {ERRbadshare, -ETXTBSY}, + {ERRbadshare, -EBUSY}, {ERRlock, -EACCES}, {ERRunsup, -EINVAL}, {ERRnosuchshare, -ENXIO}, diff --git a/fs/compat.c b/fs/compat.c index fe40fde29111..d487985dd0ea 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -558,6 +558,10 @@ ssize_t compat_rw_copy_check_uvector(int type, } *ret_pointer = iov; + ret = -EFAULT; + if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) + goto out; + /* * Single unix specification: * We should -EINVAL if an element length is not >= 0 and fitting an @@ -1080,17 +1084,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, if (!file->f_op) goto out; - ret = -EFAULT; - if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) - goto out; - - tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs, + ret = compat_rw_copy_check_uvector(type, uvector, nr_segs, UIO_FASTIOV, iovstack, &iov); - if (tot_len == 0) { - ret = 0; + if (ret <= 0) goto out; - } + tot_len = ret; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; diff --git a/fs/dcache.c b/fs/dcache.c index fbfae008ba44..e8bc3420d63e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path, bool slash = false; int error = 0; - br_read_lock(&vfsmount_lock); while (dentry != root->dentry || vfsmnt != root->mnt) { struct dentry * parent; @@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path, if (!error && !slash) error = prepend(buffer, buflen, "/", 1); -out: - br_read_unlock(&vfsmount_lock); return error; global_root: @@ -2590,7 +2587,7 @@ global_root: error = prepend(buffer, buflen, "/", 1); if (!error) error = is_mounted(vfsmnt) ? 1 : 2; - goto out; + return error; } /** @@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path, int error; prepend(&res, &buflen, "\0", 1); + br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); error = prepend_path(path, root, &res, &buflen); write_sequnlock(&rename_lock); + br_read_unlock(&vfsmount_lock); if (error < 0) return ERR_PTR(error); @@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path, int error; prepend(&res, &buflen, "\0", 1); + br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); error = prepend_path(path, &root, &res, &buflen); write_sequnlock(&rename_lock); + br_read_unlock(&vfsmount_lock); if (error > 1) error = -EINVAL; @@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); get_fs_root(current->fs, &root); + br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); error = path_with_deleted(path, &root, &res, &buflen); + write_sequnlock(&rename_lock); + br_read_unlock(&vfsmount_lock); if (error < 0) res = ERR_PTR(error); - write_sequnlock(&rename_lock); path_put(&root); return res; } @@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) get_fs_root_and_pwd(current->fs, &root, &pwd); error = -ENOENT; + br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); if (!d_unlinked(pwd.dentry)) { unsigned long len; @@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) prepend(&cwd, &buflen, "\0", 1); error = prepend_path(&pwd, &root, &cwd, &buflen); write_sequnlock(&rename_lock); + br_read_unlock(&vfsmount_lock); if (error < 0) goto out; @@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) } } else { write_sequnlock(&rename_lock); + br_read_unlock(&vfsmount_lock); } out: diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 412e6eda25f8..e4141f257495 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) int rc; mutex_lock(&ecryptfs_daemon_hash_mux); - rc = try_module_get(THIS_MODULE); - if (rc == 0) { - rc = -EIO; - printk(KERN_ERR "%s: Error attempting to increment module use " - "count; rc = [%d]\n", __func__, rc); - goto out_unlock_daemon_list; - } rc = ecryptfs_find_daemon_by_euid(&daemon); if (!rc) { rc = -EINVAL; @@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) if (rc) { printk(KERN_ERR "%s: Error attempting to spawn daemon; " "rc = [%d]\n", __func__, rc); - goto out_module_put_unlock_daemon_list; + goto out_unlock_daemon_list; } mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { @@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) atomic_inc(&ecryptfs_num_miscdev_opens); out_unlock_daemon: mutex_unlock(&daemon->mux); -out_module_put_unlock_daemon_list: - if (rc) - module_put(THIS_MODULE); out_unlock_daemon_list: mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; @@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) "bug.\n", __func__, rc); BUG(); } - module_put(THIS_MODULE); return rc; } @@ -471,6 +460,7 @@ out_free: static const struct file_operations ecryptfs_miscdev_fops = { + .owner = THIS_MODULE, .open = ecryptfs_miscdev_open, .poll = ecryptfs_miscdev_poll, .read = ecryptfs_miscdev_read, diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 8f370e012e61..7cadd823bb31 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -118,7 +118,6 @@ void ext2_free_inode (struct inode * inode) * as writing the quota to disk may need the lock as well. */ /* Quota is already initialized in iput() */ - ext2_xattr_delete_inode(inode); dquot_free_inode(inode); dquot_drop(inode); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index c3881e56662e..fe60cc1117d8 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -34,6 +34,7 @@ #include "ext2.h" #include "acl.h" #include "xip.h" +#include "xattr.h" static int __ext2_write_inode(struct inode *inode, int do_sync); @@ -88,6 +89,7 @@ void ext2_evict_inode(struct inode * inode) inode->i_size = 0; if (inode->i_blocks) ext2_truncate_blocks(inode, 0); + ext2_xattr_delete_inode(inode); } invalidate_inode_buffers(inode); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 1d6e2ed85322..fb5120a5505c 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -353,7 +353,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) return bdev; fail: - ext3_msg(sb, "error: failed to open journal device %s: %ld", + ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; @@ -887,7 +887,7 @@ static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { - ext3_msg(sb, "error: invalid sb specification: %s", + ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s", (char *) *data); return 1; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4a01ba315262..3b83cd604796 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -335,9 +335,9 @@ struct ext4_group_desc */ struct flex_groups { - atomic_t free_inodes; - atomic_t free_clusters; - atomic_t used_dirs; + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; }; #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ @@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, extern int __init ext4_init_pageio(void); extern void ext4_add_complete_io(ext4_io_end_t *io_end); extern void ext4_exit_pageio(void); -extern void ext4_ioend_wait(struct inode *); +extern void ext4_ioend_shutdown(struct inode *); extern void ext4_free_io_end(ext4_io_end_t *io); extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); extern void ext4_end_io_work(struct work_struct *work); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 28dd8eeea6a9..9c6d06dcef8b 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, unsigned short ext1_ee_len, ext2_ee_len, max_len; /* - * Make sure that either both extents are uninitialized, or - * both are _not_. + * Make sure that both extents are initialized. We don't merge + * uninitialized extents so that we can be sure that end_io code has + * the extent that was written properly split out and conversion to + * initialized is trivial. */ - if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) + if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2)) return 0; if (ext4_ext_is_uninitialized(ex1)) @@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t *handle, { ext4_fsblk_t newblock; ext4_lblk_t ee_block; - struct ext4_extent *ex, newex, orig_ex; + struct ext4_extent *ex, newex, orig_ex, zero_ex; struct ext4_extent *ex2 = NULL; unsigned int ee_len, depth; int err = 0; @@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t *handle, newblock = split - ee_block + ext4_ext_pblock(ex); BUG_ON(split < ee_block || split >= (ee_block + ee_len)); + BUG_ON(!ext4_ext_is_uninitialized(ex) && + split_flag & (EXT4_EXT_MAY_ZEROOUT | + EXT4_EXT_MARK_UNINIT1 | + EXT4_EXT_MARK_UNINIT2)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -2990,12 +2996,29 @@ static int ext4_split_extent_at(handle_t *handle, err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { - if (split_flag & EXT4_EXT_DATA_VALID1) + if (split_flag & EXT4_EXT_DATA_VALID1) { err = ext4_ext_zeroout(inode, ex2); - else + zero_ex.ee_block = ex2->ee_block; + zero_ex.ee_len = cpu_to_le16( + ext4_ext_get_actual_len(ex2)); + ext4_ext_store_pblock(&zero_ex, + ext4_ext_pblock(ex2)); + } else { err = ext4_ext_zeroout(inode, ex); - } else + zero_ex.ee_block = ex->ee_block; + zero_ex.ee_len = cpu_to_le16( + ext4_ext_get_actual_len(ex)); + ext4_ext_store_pblock(&zero_ex, + ext4_ext_pblock(ex)); + } + } else { err = ext4_ext_zeroout(inode, &orig_ex); + zero_ex.ee_block = orig_ex.ee_block; + zero_ex.ee_len = cpu_to_le16( + ext4_ext_get_actual_len(&orig_ex)); + ext4_ext_store_pblock(&zero_ex, + ext4_ext_pblock(&orig_ex)); + } if (err) goto fix_extent_len; @@ -3003,6 +3026,12 @@ static int ext4_split_extent_at(handle_t *handle, ex->ee_len = cpu_to_le16(ee_len); ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth); + if (err) + goto fix_extent_len; + + /* update extent status tree */ + err = ext4_es_zeroout(inode, &zero_ex); + goto out; } else if (err) goto fix_extent_len; @@ -3041,6 +3070,7 @@ static int ext4_split_extent(handle_t *handle, int err = 0; int uninitialized; int split_flag1, flags1; + int allocated = map->m_len; depth = ext_depth(inode); ex = path[depth].p_ext; @@ -3060,20 +3090,29 @@ static int ext4_split_extent(handle_t *handle, map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; + } else { + allocated = ee_len - (map->m_lblk - ee_block); } - + /* + * Update path is required because previous ext4_split_extent_at() may + * result in split of original leaf or extent zeroout. + */ ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) return PTR_ERR(path); + depth = ext_depth(inode); + ex = path[depth].p_ext; + uninitialized = ext4_ext_is_uninitialized(ex); + split_flag1 = 0; if (map->m_lblk >= ee_block) { - split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | - EXT4_EXT_DATA_VALID2); - if (uninitialized) + split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; + if (uninitialized) { split_flag1 |= EXT4_EXT_MARK_UNINIT1; - if (split_flag & EXT4_EXT_MARK_UNINIT2) - split_flag1 |= EXT4_EXT_MARK_UNINIT2; + split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | + EXT4_EXT_MARK_UNINIT2); + } err = ext4_split_extent_at(handle, inode, path, map->m_lblk, split_flag1, flags); if (err) @@ -3082,7 +3121,7 @@ static int ext4_split_extent(handle_t *handle, ext4_ext_show_leaf(inode, path); out: - return err ? err : map->m_len; + return err ? err : allocated; } /* @@ -3137,6 +3176,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); allocated = ee_len - (map->m_lblk - ee_block); + zero_ex.ee_len = 0; trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); @@ -3227,13 +3267,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, if (EXT4_EXT_MAY_ZEROOUT & split_flag) max_zeroout = sbi->s_extent_max_zeroout_kb >> - inode->i_sb->s_blocksize_bits; + (inode->i_sb->s_blocksize_bits - 10); /* If extent is less than s_max_zeroout_kb, zeroout directly */ if (max_zeroout && (ee_len <= max_zeroout)) { err = ext4_ext_zeroout(inode, ex); if (err) goto out; + zero_ex.ee_block = ex->ee_block; + zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); + ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -3292,6 +3335,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, err = allocated; out: + /* If we have gotten a failure, don't zero out status tree */ + if (!err) + err = ext4_es_zeroout(inode, &zero_ex); return err ? err : allocated; } @@ -3374,8 +3420,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)ee_block, ee_len); - /* If extent is larger than requested then split is required */ + /* If extent is larger than requested it is a clear sign that we still + * have some extent state machine issues left. So extent_split is still + * required. + * TODO: Once all related issues will be fixed this situation should be + * illegal. + */ if (ee_block != map->m_lblk || ee_len > map->m_len) { +#ifdef EXT4_DEBUG + ext4_warning("Inode (%ld) finished: extent logical block %llu," + " len %u; IO logical block %llu, len %u\n", + inode->i_ino, (unsigned long long)ee_block, ee_len, + (unsigned long long)map->m_lblk, map->m_len); +#endif err = ext4_split_unwritten_extents(handle, inode, map, path, EXT4_GET_BLOCKS_CONVERT); if (err < 0) @@ -3626,6 +3683,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, path, map->m_len); } else err = ret; + map->m_flags |= EXT4_MAP_MAPPED; + if (allocated > map->m_len) + allocated = map->m_len; + map->m_len = allocated; goto out2; } /* buffered IO case */ @@ -3675,6 +3736,7 @@ out: allocated - map->m_len); allocated = map->m_len; } + map->m_len = allocated; /* * If we have done fallocate with the offset that is already @@ -4106,9 +4168,6 @@ got_allocated_blocks: } } else { BUG_ON(allocated_clusters < reserved_clusters); - /* We will claim quota for all newly allocated blocks.*/ - ext4_da_update_reserve_space(inode, allocated_clusters, - 1); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); int reservation = allocated_clusters - @@ -4159,6 +4218,15 @@ got_allocated_blocks: ei->i_reserved_data_blocks += reservation; spin_unlock(&ei->i_block_reservation_lock); } + /* + * We will claim quota for all newly allocated blocks. + * We're updating the reserved space *after* the + * correction above so we do not accidentally free + * all the metadata reservation because we might + * actually need it later on. + */ + ext4_da_update_reserve_space(inode, allocated_clusters, + 1); } } @@ -4368,8 +4436,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (len <= EXT_UNINIT_MAX_LEN << blkbits) flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; - /* Prevent race condition between unwritten */ - ext4_flush_unwritten_io(inode); retry: while (ret >= 0 && ret < max_blocks) { map.m_lblk = map.m_lblk + ret; diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 95796a1b7522..fe3337a85ede 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) static int ext4_es_can_be_merged(struct extent_status *es1, struct extent_status *es2) { - if (es1->es_lblk + es1->es_len != es2->es_lblk) + if (ext4_es_status(es1) != ext4_es_status(es2)) return 0; - if (ext4_es_status(es1) != ext4_es_status(es2)) + if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL) return 0; - if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && - (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2))) + if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) return 0; - return 1; + if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && + (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) + return 1; + + if (ext4_es_is_hole(es1)) + return 1; + + /* we need to check delayed extent is without unwritten status */ + if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) + return 1; + + return 0; } static struct extent_status * @@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) return es; } +#ifdef ES_AGGRESSIVE_TEST +static void ext4_es_insert_extent_ext_check(struct inode *inode, + struct extent_status *es) +{ + struct ext4_ext_path *path = NULL; + struct ext4_extent *ex; + ext4_lblk_t ee_block; + ext4_fsblk_t ee_start; + unsigned short ee_len; + int depth, ee_status, es_status; + + path = ext4_ext_find_extent(inode, es->es_lblk, NULL); + if (IS_ERR(path)) + return; + + depth = ext_depth(inode); + ex = path[depth].p_ext; + + if (ex) { + + ee_block = le32_to_cpu(ex->ee_block); + ee_start = ext4_ext_pblock(ex); + ee_len = ext4_ext_get_actual_len(ex); + + ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0; + es_status = ext4_es_is_unwritten(es) ? 1 : 0; + + /* + * Make sure ex and es are not overlap when we try to insert + * a delayed/hole extent. + */ + if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { + if (in_range(es->es_lblk, ee_block, ee_len)) { + pr_warn("ES insert assertation failed for " + "inode: %lu we can find an extent " + "at block [%d/%d/%llu/%c], but we " + "want to add an delayed/hole extent " + "[%d/%d/%llu/%llx]\n", + inode->i_ino, ee_block, ee_len, + ee_start, ee_status ? 'u' : 'w', + es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + } + goto out; + } + + /* + * We don't check ee_block == es->es_lblk, etc. because es + * might be a part of whole extent, vice versa. + */ + if (es->es_lblk < ee_block || + ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { + pr_warn("ES insert assertation failed for inode: %lu " + "ex_status [%d/%d/%llu/%c] != " + "es_status [%d/%d/%llu/%c]\n", inode->i_ino, + ee_block, ee_len, ee_start, + ee_status ? 'u' : 'w', es->es_lblk, es->es_len, + ext4_es_pblock(es), es_status ? 'u' : 'w'); + goto out; + } + + if (ee_status ^ es_status) { + pr_warn("ES insert assertation failed for inode: %lu " + "ex_status [%d/%d/%llu/%c] != " + "es_status [%d/%d/%llu/%c]\n", inode->i_ino, + ee_block, ee_len, ee_start, + ee_status ? 'u' : 'w', es->es_lblk, es->es_len, + ext4_es_pblock(es), es_status ? 'u' : 'w'); + } + } else { + /* + * We can't find an extent on disk. So we need to make sure + * that we don't want to add an written/unwritten extent. + */ + if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { + pr_warn("ES insert assertation failed for inode: %lu " + "can't find an extent at block %d but we want " + "to add an written/unwritten extent " + "[%d/%d/%llu/%llx]\n", inode->i_ino, + es->es_lblk, es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + } + } +out: + if (path) { + ext4_ext_drop_refs(path); + kfree(path); + } +} + +static void ext4_es_insert_extent_ind_check(struct inode *inode, + struct extent_status *es) +{ + struct ext4_map_blocks map; + int retval; + + /* + * Here we call ext4_ind_map_blocks to lookup a block mapping because + * 'Indirect' structure is defined in indirect.c. So we couldn't + * access direct/indirect tree from outside. It is too dirty to define + * this function in indirect.c file. + */ + + map.m_lblk = es->es_lblk; + map.m_len = es->es_len; + + retval = ext4_ind_map_blocks(NULL, inode, &map, 0); + if (retval > 0) { + if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { + /* + * We want to add a delayed/hole extent but this + * block has been allocated. + */ + pr_warn("ES insert assertation failed for inode: %lu " + "We can find blocks but we want to add a " + "delayed/hole extent [%d/%d/%llu/%llx]\n", + inode->i_ino, es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + return; + } else if (ext4_es_is_written(es)) { + if (retval != es->es_len) { + pr_warn("ES insert assertation failed for " + "inode: %lu retval %d != es_len %d\n", + inode->i_ino, retval, es->es_len); + return; + } + if (map.m_pblk != ext4_es_pblock(es)) { + pr_warn("ES insert assertation failed for " + "inode: %lu m_pblk %llu != " + "es_pblk %llu\n", + inode->i_ino, map.m_pblk, + ext4_es_pblock(es)); + return; + } + } else { + /* + * We don't need to check unwritten extent because + * indirect-based file doesn't have it. + */ + BUG_ON(1); + } + } else if (retval == 0) { + if (ext4_es_is_written(es)) { + pr_warn("ES insert assertation failed for inode: %lu " + "We can't find the block but we want to add " + "an written extent [%d/%d/%llu/%llx]\n", + inode->i_ino, es->es_lblk, es->es_len, + ext4_es_pblock(es), ext4_es_status(es)); + return; + } + } +} + +static inline void ext4_es_insert_extent_check(struct inode *inode, + struct extent_status *es) +{ + /* + * We don't need to worry about the race condition because + * caller takes i_data_sem locking. + */ + BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + ext4_es_insert_extent_ext_check(inode, es); + else + ext4_es_insert_extent_ind_check(inode, es); +} +#else +static inline void ext4_es_insert_extent_check(struct inode *inode, + struct extent_status *es) +{ +} +#endif + static int __es_insert_extent(struct inode *inode, struct extent_status *newes) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; @@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, ext4_es_store_status(&newes, status); trace_ext4_es_insert_extent(inode, &newes); + ext4_es_insert_extent_check(inode, &newes); + write_lock(&EXT4_I(inode)->i_es_lock); err = __es_remove_extent(inode, lblk, end); if (err != 0) @@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, return err; } +int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) +{ + ext4_lblk_t ee_block; + ext4_fsblk_t ee_pblock; + unsigned int ee_len; + + ee_block = le32_to_cpu(ex->ee_block); + ee_len = ext4_ext_get_actual_len(ex); + ee_pblock = ext4_ext_pblock(ex); + + if (ee_len == 0) + return 0; + + return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, + EXTENT_STATUS_WRITTEN); +} + static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct ext4_sb_info *sbi = container_of(shrink, diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index f190dfe969da..d8e2d4dc311e 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -21,6 +21,12 @@ #endif /* + * With ES_AGGRESSIVE_TEST defined, the result of es caching will be + * checked with old map_block's result. + */ +#define ES_AGGRESSIVE_TEST__ + +/* * These flags live in the high bits of extent_status.es_pblk */ #define EXTENT_STATUS_WRITTEN (1ULL << 63) @@ -33,6 +39,8 @@ EXTENT_STATUS_DELAYED | \ EXTENT_STATUS_HOLE) +struct ext4_extent; + struct extent_status { struct rb_node rb_node; ext4_lblk_t es_lblk; /* first logical block extent covers */ @@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es); extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es); +extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex); static inline int ext4_es_is_written(struct extent_status *es) { diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 32fd2b9075dd..6c5bb8d993fe 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -324,8 +324,8 @@ error_return: } struct orlov_stats { + __u64 free_clusters; __u32 free_inodes; - __u32 free_clusters; __u32 used_dirs; }; @@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, if (flex_size > 1) { stats->free_inodes = atomic_read(&flex_group[g].free_inodes); - stats->free_clusters = atomic_read(&flex_group[g].free_clusters); + stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); stats->used_dirs = atomic_read(&flex_group[g].used_dirs); return; } diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b505a145a593..a04183127ef0 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, blk = *i_data; if (level > 0) { ext4_lblk_t first2; - bh = sb_bread(inode->i_sb, blk); + bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); if (!bh) { - EXT4_ERROR_INODE_BLOCK(inode, blk, + EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), "Read failure"); return -EIO; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9ea0cde3fa9e..b3a5213bc73e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inode) trace_ext4_evict_inode(inode); - ext4_ioend_wait(inode); - if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the @@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inode) * don't use page cache. */ if (ext4_should_journal_data(inode) && - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && + inode->i_ino != EXT4_JOURNAL_INO) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; @@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inode) filemap_write_and_wait(&inode->i_data); } truncate_inode_pages(&inode->i_data, 0); + ext4_ioend_shutdown(inode); goto no_delete; } @@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inode) if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages(&inode->i_data, 0); + ext4_ioend_shutdown(inode); if (is_bad_inode(inode)) goto no_delete; @@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, return num; } +#ifdef ES_AGGRESSIVE_TEST +static void ext4_map_blocks_es_recheck(handle_t *handle, + struct inode *inode, + struct ext4_map_blocks *es_map, + struct ext4_map_blocks *map, + int flags) +{ + int retval; + + map->m_flags = 0; + /* + * There is a race window that the result is not the same. + * e.g. xfstests #223 when dioread_nolock enables. The reason + * is that we lookup a block mapping in extent status tree with + * out taking i_data_sem. So at the time the unwritten extent + * could be converted. + */ + if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) + down_read((&EXT4_I(inode)->i_data_sem)); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { + retval = ext4_ext_map_blocks(handle, inode, map, flags & + EXT4_GET_BLOCKS_KEEP_SIZE); + } else { + retval = ext4_ind_map_blocks(handle, inode, map, flags & + EXT4_GET_BLOCKS_KEEP_SIZE); + } + if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) + up_read((&EXT4_I(inode)->i_data_sem)); + /* + * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag + * because it shouldn't be marked in es_map->m_flags. + */ + map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY); + + /* + * We don't check m_len because extent will be collpased in status + * tree. So the m_len might not equal. + */ + if (es_map->m_lblk != map->m_lblk || + es_map->m_flags != map->m_flags || + es_map->m_pblk != map->m_pblk) { + printk("ES cache assertation failed for inode: %lu " + "es_cached ex [%d/%d/%llu/%x] != " + "found ex [%d/%d/%llu/%x] retval %d flags %x\n", + inode->i_ino, es_map->m_lblk, es_map->m_len, + es_map->m_pblk, es_map->m_flags, map->m_lblk, + map->m_len, map->m_pblk, map->m_flags, + retval, flags); + } +} +#endif /* ES_AGGRESSIVE_TEST */ + /* * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. @@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, { struct extent_status es; int retval; +#ifdef ES_AGGRESSIVE_TEST + struct ext4_map_blocks orig_map; + + memcpy(&orig_map, map, sizeof(*map)); +#endif map->m_flags = 0; ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," @@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, } else { BUG_ON(1); } +#ifdef ES_AGGRESSIVE_TEST + ext4_map_blocks_es_recheck(handle, inode, map, + &orig_map, flags); +#endif goto found; } @@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, int ret; unsigned long long status; +#ifdef ES_AGGRESSIVE_TEST + if (retval != map->m_len) { + printk("ES len assertation failed for inode: %lu " + "retval %d != map->m_len %d " + "in %s (lookup)\n", inode->i_ino, retval, + map->m_len, __func__); + } +#endif + status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && @@ -643,6 +714,24 @@ found: int ret; unsigned long long status; +#ifdef ES_AGGRESSIVE_TEST + if (retval != map->m_len) { + printk("ES len assertation failed for inode: %lu " + "retval %d != map->m_len %d " + "in %s (allocation)\n", inode->i_ino, retval, + map->m_len, __func__); + } +#endif + + /* + * If the extent has been zeroed out, we don't need to update + * extent status tree. + */ + if ((flags & EXT4_GET_BLOCKS_PRE_IO) && + ext4_es_lookup_extent(inode, map->m_lblk, &es)) { + if (ext4_es_is_written(&es)) + goto has_zeroout; + } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && @@ -655,6 +744,7 @@ found: retval = ret; } +has_zeroout: up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { int ret = check_block_validity(inode, map); @@ -1216,6 +1306,55 @@ static int ext4_journalled_write_end(struct file *file, } /* + * Reserve a metadata for a single block located at lblock + */ +static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) +{ + int retries = 0; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + unsigned int md_needed; + ext4_lblk_t save_last_lblock; + int save_len; + + /* + * recalculate the amount of metadata blocks to reserve + * in order to allocate nrblocks + * worse case is one extent per block + */ +repeat: + spin_lock(&ei->i_block_reservation_lock); + /* + * ext4_calc_metadata_amount() has side effects, which we have + * to be prepared undo if we fail to claim space. + */ + save_len = ei->i_da_metadata_calc_len; + save_last_lblock = ei->i_da_metadata_calc_last_lblock; + md_needed = EXT4_NUM_B2C(sbi, + ext4_calc_metadata_amount(inode, lblock)); + trace_ext4_da_reserve_space(inode, md_needed); + + /* + * We do still charge estimated metadata to the sb though; + * we cannot afford to run out of free blocks. + */ + if (ext4_claim_free_clusters(sbi, md_needed, 0)) { + ei->i_da_metadata_calc_len = save_len; + ei->i_da_metadata_calc_last_lblock = save_last_lblock; + spin_unlock(&ei->i_block_reservation_lock); + if (ext4_should_retry_alloc(inode->i_sb, &retries)) { + cond_resched(); + goto repeat; + } + return -ENOSPC; + } + ei->i_reserved_meta_blocks += md_needed; + spin_unlock(&ei->i_block_reservation_lock); + + return 0; /* success */ +} + +/* * Reserve a single cluster located at lblock */ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) @@ -1263,7 +1402,7 @@ repeat: ei->i_da_metadata_calc_last_lblock = save_last_lblock; spin_unlock(&ei->i_block_reservation_lock); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { - yield(); + cond_resched(); goto repeat; } dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); @@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, struct extent_status es; int retval; sector_t invalid_block = ~((sector_t) 0xffff); +#ifdef ES_AGGRESSIVE_TEST + struct ext4_map_blocks orig_map; + + memcpy(&orig_map, map, sizeof(*map)); +#endif if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; @@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, else BUG_ON(1); +#ifdef ES_AGGRESSIVE_TEST + ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); +#endif return retval; } @@ -1843,8 +1990,11 @@ add_delayed: * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ - /* If the block was allocated from previously allocated cluster, - * then we dont need to reserve it again. */ + /* + * If the block was allocated from previously allocated cluster, + * then we don't need to reserve it again. However we still need + * to reserve metadata for every block we're going to write. + */ if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { ret = ext4_da_reserve_space(inode, iblock); if (ret) { @@ -1852,6 +2002,13 @@ add_delayed: retval = ret; goto out_unlock; } + } else { + ret = ext4_da_reserve_metadata(inode, iblock); + if (ret) { + /* not enough space to reserve */ + retval = ret; + goto out_unlock; + } } ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, @@ -1873,6 +2030,15 @@ add_delayed: int ret; unsigned long long status; +#ifdef ES_AGGRESSIVE_TEST + if (retval != map->m_len) { + printk("ES len assertation failed for inode: %lu " + "retval %d != map->m_len %d " + "in %s (lookup)\n", inode->i_ino, retval, + map->m_len, __func__); + } +#endif + status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, @@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait) trace_ext4_releasepage(page); - WARN_ON(PageChecked(page)); - if (!page_has_buffers(page)) + /* Page has dirty journalled data -> cannot release */ + if (PageChecked(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 7bb713a46fe4..ee6614bdb639 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); - atomic_sub(ac->ac_b_ex.fe_len, - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_sub(ac->ac_b_ex.fe_len, + &sbi->s_flex_groups[flex_group].free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); @@ -3692,11 +3692,7 @@ repeat: if (free < needed && busy) { busy = 0; ext4_unlock_group(sb, group); - /* - * Yield the CPU here so that we don't get soft lockup - * in non preempt case. - */ - yield(); + cond_resched(); goto repeat; } @@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { /* let others to free the space */ - yield(); + cond_resched(); ar->len = ar->len >> 1; } if (!ar->len) { @@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_group_desc *gdp; - unsigned long freed = 0; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; @@ -4666,14 +4661,12 @@ do_more: if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - atomic_add(count_clusters, - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(count_clusters, + &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); - freed += count; - if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); @@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - atomic_add(EXT4_NUM_B2C(sbi, blocks_freed), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), + &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 4e81d47aa8cb..33e1c086858b 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -32,16 +32,18 @@ */ static inline int get_ext_path(struct inode *inode, ext4_lblk_t lblock, - struct ext4_ext_path **path) + struct ext4_ext_path **orig_path) { int ret = 0; + struct ext4_ext_path *path; - *path = ext4_ext_find_extent(inode, lblock, *path); - if (IS_ERR(*path)) { - ret = PTR_ERR(*path); - *path = NULL; - } else if ((*path)[ext_depth(inode)].p_ext == NULL) + path = ext4_ext_find_extent(inode, lblock, *orig_path); + if (IS_ERR(path)) + ret = PTR_ERR(path); + else if (path[ext_depth(inode)].p_ext == NULL) ret = -ENODATA; + else + *orig_path = path; return ret; } @@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count, { struct ext4_ext_path *path = NULL; struct ext4_extent *ext; + int ret = 0; ext4_lblk_t last = from + count; while (from < last) { *err = get_ext_path(inode, from, &path); if (*err) - return 0; + goto out; ext = path[ext_depth(inode)].p_ext; - if (!ext) { - ext4_ext_drop_refs(path); - return 0; - } - if (uninit != ext4_ext_is_uninitialized(ext)) { - ext4_ext_drop_refs(path); - return 0; - } + if (uninit != ext4_ext_is_uninitialized(ext)) + goto out; from += ext4_ext_get_actual_len(ext); ext4_ext_drop_refs(path); } - return 1; + ret = 1; +out: + if (path) { + ext4_ext_drop_refs(path); + kfree(path); + } + return ret; } /** @@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, int replaced_count = 0; int dext_alen; + *err = ext4_es_remove_extent(orig_inode, from, count); + if (*err) + goto out; + + *err = ext4_es_remove_extent(donor_inode, from, count); + if (*err) + goto out; + /* Get the original extent for the block "orig_off" */ *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 809b31003ecc..047a6de04a0a 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -50,11 +50,21 @@ void ext4_exit_pageio(void) kmem_cache_destroy(io_page_cachep); } -void ext4_ioend_wait(struct inode *inode) +/* + * This function is called by ext4_evict_inode() to make sure there is + * no more pending I/O completion work left to do. + */ +void ext4_ioend_shutdown(struct inode *inode) { wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); + /* + * We need to make sure the work structure is finished being + * used before we let the inode get destroyed. + */ + if (work_pending(&EXT4_I(inode)->i_unwritten_work)) + cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); } static void put_io_page(struct ext4_io_page *io_page) diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index b2c8ee56eb98..c169477a62c9 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb, sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, group_data[0].group); - atomic_add(EXT4_NUM_B2C(sbi, free_blocks), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), + &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, &sbi->s_flex_groups[flex_group].free_inodes); } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 34e855219231..5d6d53578124 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -91,6 +91,7 @@ static struct file_system_type ext2_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext2"); +MODULE_ALIAS("ext2"); #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) #else #define IS_EXT2_SB(sb) (0) @@ -106,6 +107,7 @@ static struct file_system_type ext3_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); +MODULE_ALIAS("ext3"); #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) #else #define IS_EXT3_SB(sb) (0) @@ -1925,8 +1927,8 @@ static int ext4_fill_flex_info(struct super_block *sb) flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); - atomic_add(ext4_free_group_clusters(sb, gdp), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic64_add(ext4_free_group_clusters(sb, gdp), + &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c index 455074308069..e37eb274e492 100644 --- a/fs/freevxfs/vxfs_super.c +++ b/fs/freevxfs/vxfs_super.c @@ -258,6 +258,7 @@ static struct file_system_type vxfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */ +MODULE_ALIAS("vxfs"); static int __init vxfs_init(void) diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 019f45e45097..d79c2dadc536 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) cmd = F_SETLK; fl->fl_type = F_UNLCK; } - if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) + if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { + if (fl->fl_type == F_UNLCK) + posix_lock_file_wait(file, fl); return -EIO; + } if (IS_GETLK(cmd)) return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); else if (fl->fl_type == F_UNLCK) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 156e42ec84ea..5c29216e9cc1 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -588,6 +588,7 @@ struct lm_lockstruct { struct dlm_lksb ls_control_lksb; /* control_lock */ char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ + char *ls_lvb_bits; spinlock_t ls_recover_spin; /* protects following fields */ unsigned long ls_recover_flags; /* DFL_ */ diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9802de0f85e6..c8423d6de6c3 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, static int all_jid_bits_clear(char *lvb) { - int i; - for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) { - if (lvb[i]) - return 0; - } - return 1; + return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, + GDLM_LVB_SIZE - JID_BITMAP_OFFSET); } static void sync_wait_cb(void *arg) @@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work) { struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); struct lm_lockstruct *ls = &sdp->sd_lockstruct; - char lvb_bits[GDLM_LVB_SIZE]; uint32_t block_gen, start_gen, lvb_gen, flags; int recover_set = 0; int write_lvb = 0; @@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work) return; } - control_lvb_read(ls, &lvb_gen, lvb_bits); + control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); spin_lock(&ls->ls_recover_spin); if (block_gen != ls->ls_recover_block || @@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work) ls->ls_recover_result[i] = 0; - if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) + if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) continue; - __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); + __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); write_lvb = 1; } } @@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work) continue; if (ls->ls_recover_submit[i] < start_gen) { ls->ls_recover_submit[i] = 0; - __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); + __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); } } /* even if there are no bits to set, we need to write the @@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work) spin_unlock(&ls->ls_recover_spin); if (write_lvb) { - control_lvb_write(ls, start_gen, lvb_bits); + control_lvb_write(ls, start_gen, ls->ls_lvb_bits); flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; } else { flags = DLM_LKF_CONVERT; @@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work) */ for (i = 0; i < recover_size; i++) { - if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { + if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { fs_info(sdp, "recover generation %u jid %d\n", start_gen, i); gfs2_recover_set(sdp, i); @@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work) static int control_mount(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; - char lvb_bits[GDLM_LVB_SIZE]; uint32_t start_gen, block_gen, mount_gen, lvb_gen; int mounted_mode; int retries = 0; @@ -857,7 +851,7 @@ locks_done: * lvb_gen will be non-zero. */ - control_lvb_read(ls, &lvb_gen, lvb_bits); + control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); if (lvb_gen == 0xFFFFFFFF) { /* special value to force mount attempts to fail */ @@ -887,7 +881,7 @@ locks_done: * and all lvb bits to be clear (no pending journal recoveries.) */ - if (!all_jid_bits_clear(lvb_bits)) { + if (!all_jid_bits_clear(ls->ls_lvb_bits)) { /* journals need recovery, wait until all are clear */ fs_info(sdp, "control_mount wait for journal recovery\n"); goto restart; @@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word) static int control_first_done(struct gfs2_sbd *sdp) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; - char lvb_bits[GDLM_LVB_SIZE]; uint32_t start_gen, block_gen; int error; @@ -991,8 +984,8 @@ restart: memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); spin_unlock(&ls->ls_recover_spin); - memset(lvb_bits, 0, sizeof(lvb_bits)); - control_lvb_write(ls, start_gen, lvb_bits); + memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); + control_lvb_write(ls, start_gen, ls->ls_lvb_bits); error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); if (error) @@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, uint32_t old_size, new_size; int i, max_jid; + if (!ls->ls_lvb_bits) { + ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); + if (!ls->ls_lvb_bits) + return -ENOMEM; + } + max_jid = 0; for (i = 0; i < num_slots; i++) { if (max_jid < slots[i].slot - 1) @@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, static void free_recover_size(struct lm_lockstruct *ls) { + kfree(ls->ls_lvb_bits); kfree(ls->ls_recover_submit); kfree(ls->ls_recover_result); ls->ls_recover_submit = NULL; @@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) ls->ls_recover_size = 0; ls->ls_recover_submit = NULL; ls->ls_recover_result = NULL; + ls->ls_lvb_bits = NULL; error = set_recover_size(sdp, NULL, 0); if (error) diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index d1f51fd73f86..5a51265a4341 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) RB_CLEAR_NODE(&ip->i_res->rs_node); out: up_write(&ip->i_rw_mutex); - return 0; + return error; } static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) @@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) { struct super_block *sb = sdp->sd_vfs; - struct block_device *bdev = sb->s_bdev; - const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / - bdev_logical_block_size(sb->s_bdev); u64 blk; sector_t start = 0; - sector_t nr_sects = 0; + sector_t nr_blks = 0; int rv; unsigned int x; u32 trimmed = 0; @@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, if (diff == 0) continue; blk = offset + ((bi->bi_start + x) * GFS2_NBBY); - blk *= sects_per_blk; /* convert to sectors */ while(diff) { if (diff & 1) { - if (nr_sects == 0) + if (nr_blks == 0) goto start_new_extent; - if ((start + nr_sects) != blk) { - if (nr_sects >= minlen) { - rv = blkdev_issue_discard(bdev, - start, nr_sects, + if ((start + nr_blks) != blk) { + if (nr_blks >= minlen) { + rv = sb_issue_discard(sb, + start, nr_blks, GFP_NOFS, 0); if (rv) goto fail; - trimmed += nr_sects; + trimmed += nr_blks; } - nr_sects = 0; + nr_blks = 0; start_new_extent: start = blk; } - nr_sects += sects_per_blk; + nr_blks++; } diff >>= 2; - blk += sects_per_blk; + blk++; } } - if (nr_sects >= minlen) { - rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); + if (nr_blks >= minlen) { + rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); if (rv) goto fail; - trimmed += nr_sects; + trimmed += nr_blks; } if (ptrimmed) *ptrimmed = trimmed; diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index a94f0f779d5e..fe0a76213d9e 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode) struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; - u32 size = inode->i_size; + loff_t size = inode->i_size; res = pagecache_write_begin(NULL, mapping, size, 0, AOP_FLAG_UNINTERRUPTIBLE, diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index fbabb906066f..0f6e52d22b84 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -845,15 +845,8 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr) return err; if ((attr->ia_valid & ATTR_SIZE) && - attr->ia_size != i_size_read(inode)) { - int error; - - error = inode_newsize_ok(inode, attr->ia_size); - if (error) - return error; - + attr->ia_size != i_size_read(inode)) truncate_setsize(inode, attr->ia_size); - } setattr_copy(inode, attr); mark_inode_dirty(inode); @@ -993,6 +986,7 @@ static struct file_system_type hostfs_type = { .kill_sb = hostfs_kill_sb, .fs_flags = 0, }; +MODULE_ALIAS_FS("hostfs"); static int __init init_hostfs(void) { diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index a3076228523d..a0617e706957 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -688,6 +688,7 @@ static struct file_system_type hpfs_fs_type = { .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("hpfs"); static int __init init_hpfs_fs(void) { diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 84e3d856e91d..523464e62849 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) * way when do_mmap_pgoff unwinds (may be important on powerpc * and ia64). */ - vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; vma->vm_ops = &hugetlb_vm_ops; if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) diff --git a/fs/inode.c b/fs/inode.c index f5f7c06c36fb..a898b3d43ccf 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan) * inode to the back of the list so we don't spin on it. */ if (!spin_trylock(&inode->i_lock)) { - list_move_tail(&inode->i_lru, &sb->s_inode_lru); + list_move(&inode->i_lru, &sb->s_inode_lru); continue; } diff --git a/fs/internal.h b/fs/internal.h index 507141fceb99..4be78237d896 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool); * dcache.c */ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); + +/* + * read_write.c + */ +extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index a67f16e846a2..d9b8aebdeb22 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1557,6 +1557,7 @@ static struct file_system_type iso9660_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("iso9660"); +MODULE_ALIAS("iso9660"); static int __init init_iso9660_fs(void) { diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index d6ee5aed56b1..325bc019ed88 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1065,9 +1065,12 @@ out: void jbd2_journal_set_triggers(struct buffer_head *bh, struct jbd2_buffer_trigger_type *type) { - struct journal_head *jh = bh2jh(bh); + struct journal_head *jh = jbd2_journal_grab_journal_head(bh); + if (WARN_ON(!jh)) + return; jh->b_triggers = type; + jbd2_journal_put_journal_head(jh); } void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, @@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; - struct journal_head *jh = bh2jh(bh); + struct journal_head *jh; int ret = 0; - jbd_debug(5, "journal_head %p\n", jh); - JBUFFER_TRACE(jh, "entry"); if (is_handle_aborted(handle)) goto out; - if (!buffer_jbd(bh)) { + jh = jbd2_journal_grab_journal_head(bh); + if (!jh) { ret = -EUCLEAN; goto out; } + jbd_debug(5, "journal_head %p\n", jh); + JBUFFER_TRACE(jh, "entry"); jbd_lock_bh_state(bh); @@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); + jbd2_journal_put_journal_head(jh); out: JBUFFER_TRACE(jh, "exit"); WARN_ON(ret); /* All errors are bugs, so dump the stack */ diff --git a/fs/namespace.c b/fs/namespace.c index 50ca17d3cb45..341d3f564082 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -798,6 +798,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; + /* Don't allow unprivileged users to change mount flags */ + if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) + mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; + atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); @@ -1686,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name, if (IS_ERR(mnt)) { err = PTR_ERR(mnt); - goto out; + goto out2; } err = graft_tree(mnt, path); @@ -1713,6 +1717,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags) if (readonly_request == __mnt_is_readonly(mnt)) return 0; + if (mnt->mnt_flags & MNT_LOCK_READONLY) + return -EPERM; + if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else @@ -2339,7 +2346,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, /* First pass: copy the tree topology */ copy_flags = CL_COPY_ALL | CL_EXPIRE; if (user_ns != mnt_ns->user_ns) - copy_flags |= CL_SHARED_TO_SLAVE; + copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { up_write(&namespace_sem); @@ -2732,6 +2739,51 @@ bool our_mnt(struct vfsmount *mnt) return check_mnt(real_mount(mnt)); } +bool current_chrooted(void) +{ + /* Does the current process have a non-standard root */ + struct path ns_root; + struct path fs_root; + bool chrooted; + + /* Find the namespace root */ + ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; + ns_root.dentry = ns_root.mnt->mnt_root; + path_get(&ns_root); + while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) + ; + + get_fs_root(current->fs, &fs_root); + + chrooted = !path_equal(&fs_root, &ns_root); + + path_put(&fs_root); + path_put(&ns_root); + + return chrooted; +} + +void update_mnt_policy(struct user_namespace *userns) +{ + struct mnt_namespace *ns = current->nsproxy->mnt_ns; + struct mount *mnt; + + down_read(&namespace_sem); + list_for_each_entry(mnt, &ns->list, mnt_list) { + switch (mnt->mnt.mnt_sb->s_magic) { + case SYSFS_MAGIC: + userns->may_mount_sysfs = true; + break; + case PROC_SUPER_MAGIC: + userns->may_mount_proc = true; + break; + } + if (userns->may_mount_sysfs && userns->may_mount_proc) + break; + } + up_read(&namespace_sem); +} + static void *mntns_get(struct task_struct *task) { struct mnt_namespace *ns = NULL; diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 737d839bc17b..6fc7b5cae92b 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev) bl_pipe_msg.bl_wq = &nn->bl_wq; memset(msg, 0, sizeof(*msg)); - msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); + msg->len = sizeof(bl_msg) + bl_msg.totallen; + msg->data = kzalloc(msg->len, GFP_NOFS); if (!msg->data) goto out; @@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev) memcpy(msg->data, &bl_msg, sizeof(bl_msg)); dataptr = (uint8_t *) msg->data; memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); - msg->len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&nn->bl_wq, &wq); if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index dc0f98dfa717..c516da5873fd 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -726,9 +726,9 @@ out1: return ret; } -static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data) +static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen) { - return key_instantiate_and_link(key, data, strlen(data) + 1, + return key_instantiate_and_link(key, data, datalen, id_resolver_cache->thread_keyring, authkey); } @@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, struct key *key, struct key *authkey) { char id_str[NFS_UINT_MAXLEN]; + size_t len; int ret = -ENOKEY; /* ret = -ENOKEY */ @@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, case IDMAP_CONV_NAMETOID: if (strcmp(upcall->im_name, im->im_name) != 0) break; - sprintf(id_str, "%d", im->im_id); - ret = nfs_idmap_instantiate(key, authkey, id_str); + /* Note: here we store the NUL terminator too */ + len = sprintf(id_str, "%d", im->im_id) + 1; + ret = nfs_idmap_instantiate(key, authkey, id_str, len); break; case IDMAP_CONV_IDTONAME: if (upcall->im_id != im->im_id) break; - ret = nfs_idmap_instantiate(key, authkey, im->im_name); + len = strlen(im->im_name); + ret = nfs_idmap_instantiate(key, authkey, im->im_name, len); break; default: ret = -EINVAL; diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index ac4fc9a8fdbc..66b6664dcd4c 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new, struct rpc_cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); - struct nfs_client *pos, *n, *prev = NULL; + struct nfs_client *pos, *prev = NULL; struct nfs4_setclientid_res clid = { .clientid = new->cl_clientid, .confirm = new->cl_confirm, @@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new, int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); - list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { + list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { /* If "pos" isn't marked ready, we can't trust the * remaining fields in "pos" */ - if (pos->cl_cons_state < NFS_CS_READY) + if (pos->cl_cons_state > NFS_CS_READY) { + atomic_inc(&pos->cl_count); + spin_unlock(&nn->nfs_client_lock); + + if (prev) + nfs_put_client(prev); + prev = pos; + + status = nfs_wait_client_init_complete(pos); + spin_lock(&nn->nfs_client_lock); + if (status < 0) + continue; + } + if (pos->cl_cons_state != NFS_CS_READY) continue; if (pos->rpc_ops != new->rpc_ops) @@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new, struct rpc_cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); - struct nfs_client *pos, *n, *prev = NULL; + struct nfs_client *pos, *prev = NULL; int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); - list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { + list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { /* If "pos" isn't marked ready, we can't trust the * remaining fields in "pos", especially the client * ID and serverowner fields. Wait for CREATE_SESSION * to finish. */ - if (pos->cl_cons_state < NFS_CS_READY) { + if (pos->cl_cons_state > NFS_CS_READY) { atomic_inc(&pos->cl_count); spin_unlock(&nn->nfs_client_lock); @@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new, nfs_put_client(prev); prev = pos; - nfs4_schedule_lease_recovery(pos); status = nfs_wait_client_init_complete(pos); - if (status < 0) { - nfs_put_client(pos); - spin_lock(&nn->nfs_client_lock); - continue; + if (status == 0) { + nfs4_schedule_lease_recovery(pos); + status = nfs4_wait_clnt_recover(pos); } - status = pos->cl_cons_state; spin_lock(&nn->nfs_client_lock); if (status < 0) continue; } + if (pos->cl_cons_state != NFS_CS_READY) + continue; if (pos->rpc_ops != new->rpc_ops) continue; @@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new, continue; atomic_inc(&pos->cl_count); - spin_unlock(&nn->nfs_client_lock); + *result = pos; + status = 0; dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", __func__, pos, atomic_read(&pos->cl_count)); - - *result = pos; - return 0; + break; } /* No matching nfs_client found. */ spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s status = %d\n", __func__, status); + if (prev) + nfs_put_client(prev); return status; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 49eeb044c109..4fb234d3aefb 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo) { if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) return; - clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags); pnfs_return_layout(inode); } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b2671cb0f901..0ad025eb523b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) /* Save the delegation */ nfs4_stateid_copy(&stateid, &delegation->stateid); rcu_read_unlock(); + nfs_release_seqid(opendata->o_arg.seqid); ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); if (ret != 0) goto out; @@ -2632,7 +2633,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, int status; if (pnfs_ld_layoutret_on_setattr(inode)) - pnfs_return_layout(inode); + pnfs_commit_and_return_layout(inode); nfs_fattr_init(fattr); @@ -6416,22 +6417,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) static void nfs4_layoutcommit_release(void *calldata) { struct nfs4_layoutcommit_data *data = calldata; - struct pnfs_layout_segment *lseg, *tmp; - unsigned long *bitlock = &NFS_I(data->args.inode)->flags; pnfs_cleanup_layoutcommit(data); - /* Matched by references in pnfs_set_layoutcommit */ - list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { - list_del_init(&lseg->pls_lc_list); - if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, - &lseg->pls_flags)) - pnfs_put_lseg(lseg); - } - - clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); - smp_mb__after_clear_bit(); - wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); - put_rpccred(data->cred); kfree(data); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6ace365c6334..d41a3518509f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1886,7 +1886,13 @@ again: status = PTR_ERR(clnt); break; } - clp->cl_rpcclient = clnt; + /* Note: this is safe because we haven't yet marked the + * client as ready, so we are the only user of + * clp->cl_rpcclient + */ + clnt = xchg(&clp->cl_rpcclient, clnt); + rpc_shutdown_client(clnt); + clnt = clp->cl_rpcclient; goto again; case -NFS4ERR_MINOR_VERS_MISMATCH: diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 48ac5aad6258..4bdffe0ba025 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range, lo_seg_intersecting(lseg_range, recall_range); } +static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, + struct list_head *tmp_list) +{ + if (!atomic_dec_and_test(&lseg->pls_refcount)) + return false; + pnfs_layout_remove_lseg(lseg->pls_layout, lseg); + list_add(&lseg->pls_list, tmp_list); + return true; +} + /* Returns 1 if lseg is removed from list, 0 otherwise */ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, struct list_head *tmp_list) @@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, */ dprintk("%s: lseg %p ref %d\n", __func__, lseg, atomic_read(&lseg->pls_refcount)); - if (atomic_dec_and_test(&lseg->pls_refcount)) { - pnfs_layout_remove_lseg(lseg->pls_layout, lseg); - list_add(&lseg->pls_list, tmp_list); + if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list)) rv = 1; - } } return rv; } @@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo, return lseg; } +static void pnfs_clear_layoutcommit(struct inode *inode, + struct list_head *head) +{ + struct nfs_inode *nfsi = NFS_I(inode); + struct pnfs_layout_segment *lseg, *tmp; + + if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) + return; + list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) { + if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) + continue; + pnfs_lseg_dec_and_remove_zero(lseg, head); + } +} + /* * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr * when the layout segment list is empty. @@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino) /* Reference matched in nfs4_layoutreturn_release */ pnfs_get_layout_hdr(lo); empty = list_empty(&lo->plh_segs); + pnfs_clear_layoutcommit(ino, &tmp_list); pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); /* Don't send a LAYOUTRETURN if list was initially empty */ if (empty) { @@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino) spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); - WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)); - lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); if (unlikely(lrp == NULL)) { status = -ENOMEM; @@ -845,6 +866,33 @@ out: } EXPORT_SYMBOL_GPL(_pnfs_return_layout); +int +pnfs_commit_and_return_layout(struct inode *inode) +{ + struct pnfs_layout_hdr *lo; + int ret; + + spin_lock(&inode->i_lock); + lo = NFS_I(inode)->layout; + if (lo == NULL) { + spin_unlock(&inode->i_lock); + return 0; + } + pnfs_get_layout_hdr(lo); + /* Block new layoutgets and read/write to ds */ + lo->plh_block_lgets++; + spin_unlock(&inode->i_lock); + filemap_fdatawait(inode->i_mapping); + ret = pnfs_layoutcommit_inode(inode, true); + if (ret == 0) + ret = _pnfs_return_layout(inode); + spin_lock(&inode->i_lock); + lo->plh_block_lgets--; + spin_unlock(&inode->i_lock); + pnfs_put_layout_hdr(lo); + return ret; +} + bool pnfs_roc(struct inode *ino) { struct pnfs_layout_hdr *lo; @@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data) dprintk("pnfs write error = %d\n", hdr->pnfs_error); if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & PNFS_LAYOUTRET_ON_ERROR) { - clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); pnfs_return_layout(hdr->inode); } if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) @@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data) dprintk("pnfs read error = %d\n", hdr->pnfs_error); if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & PNFS_LAYOUTRET_ON_ERROR) { - clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); pnfs_return_layout(hdr->inode); } if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) @@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { if (lseg->pls_range.iomode == IOMODE_RW && - test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) + test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) list_add(&lseg->pls_lc_list, listp); } } +static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) +{ + struct pnfs_layout_segment *lseg, *tmp; + unsigned long *bitlock = &NFS_I(inode)->flags; + + /* Matched by references in pnfs_set_layoutcommit */ + list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { + list_del_init(&lseg->pls_lc_list); + pnfs_put_lseg(lseg); + } + + clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); + smp_mb__after_clear_bit(); + wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); +} + void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) { pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); @@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) if (nfss->pnfs_curr_ld->cleanup_layoutcommit) nfss->pnfs_curr_ld->cleanup_layoutcommit(data); + pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list); } /* diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 94ba80417748..f5f8a470a647 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata); void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); int pnfs_layoutcommit_inode(struct inode *inode, bool sync); int _pnfs_return_layout(struct inode *); +int pnfs_commit_and_return_layout(struct inode *); void pnfs_ld_write_done(struct nfs_write_data *); void pnfs_ld_read_done(struct nfs_read_data *); struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, @@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino) return 0; } +static inline int pnfs_commit_and_return_layout(struct inode *inode) +{ + return 0; +} + static inline bool pnfs_ld_layoutret_on_setattr(struct inode *inode) { diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 95cdcb208dfb..2f8a29db0f1b 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -335,6 +335,7 @@ struct file_system_type nfs4_fs_type = { .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA, }; MODULE_ALIAS_FS("nfs4"); +MODULE_ALIAS("nfs4"); EXPORT_SYMBOL_GPL(nfs4_fs_type); static int __init register_nfs4_fs(void) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 16d39c6c4fbb..2e27430b9070 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -230,37 +230,6 @@ static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) __nfs4_file_put_access(fp, oflag); } -static inline int get_new_stid(struct nfs4_stid *stid) -{ - static int min_stateid = 0; - struct idr *stateids = &stid->sc_client->cl_stateids; - int new_stid; - int error; - - error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); - /* - * Note: the necessary preallocation was done in - * nfs4_alloc_stateid(). The idr code caps the number of - * preallocations that can exist at a time, but the state lock - * prevents anyone from using ours before we get here: - */ - WARN_ON_ONCE(error); - /* - * It shouldn't be a problem to reuse an opaque stateid value. - * I don't think it is for 4.1. But with 4.0 I worry that, for - * example, a stray write retransmission could be accepted by - * the server when it should have been rejected. Therefore, - * adopt a trick from the sctp code to attempt to maximize the - * amount of time until an id is reused, by ensuring they always - * "increase" (mod INT_MAX): - */ - - min_stateid = new_stid+1; - if (min_stateid == INT_MAX) - min_stateid = 0; - return new_stid; -} - static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) { @@ -273,9 +242,8 @@ kmem_cache *slab) if (!stid) return NULL; - if (!idr_pre_get(stateids, GFP_KERNEL)) - goto out_free; - if (idr_get_new_above(stateids, stid, min_stateid, &new_id)) + new_id = idr_alloc(stateids, stid, min_stateid, 0, GFP_KERNEL); + if (new_id < 0) goto out_free; stid->sc_client = cl; stid->sc_type = 0; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 01168865dd37..a2720071f282 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, iattr->ia_valid |= ATTR_SIZE; } if (bmval[0] & FATTR4_WORD0_ACL) { - int nace; + u32 nace; struct nfs4_ace *ace; READ_BUF(4); len += 4; diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 62c1ee128aeb..ca05f6dc3544 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp) { if (rp->c_type == RC_REPLBUFF) kfree(rp->c_replvec.iov_base); - hlist_del(&rp->c_hash); + if (!hlist_unhashed(&rp->c_hash)) + hlist_del(&rp->c_hash); list_del(&rp->c_lru); --num_drc_entries; kmem_cache_free(drc_slab, rp); @@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp) int nfsd_reply_cache_init(void) { + INIT_LIST_HEAD(&lru_head); + max_drc_entries = nfsd_cache_size_limit(); + num_drc_entries = 0; + register_shrinker(&nfsd_reply_cache_shrinker); drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 0, 0, NULL); @@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void) if (!cache_hash) goto out_nomem; - INIT_LIST_HEAD(&lru_head); - max_drc_entries = nfsd_cache_size_limit(); - num_drc_entries = 0; - return 0; out_nomem: printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 2a7eb536de0b..2b2e2396a869 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, int host_err; int stable = *stablep; int use_wgather; + loff_t pos = offset; dentry = file->f_path.dentry; inode = dentry->d_inode; @@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); + host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos); set_fs(oldfs); if (host_err < 0) goto out_nfserr; diff --git a/fs/pipe.c b/fs/pipe.c index 64a494cef0a0..2234f3f61f8d 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -863,6 +863,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp) { int ret = -ENOENT; + if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE))) + return -EINVAL; + mutex_lock(&inode->i_mutex); if (inode->i_pipe) { diff --git a/fs/pnode.c b/fs/pnode.c index 3e000a51ac0d..8b29d2164da6 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -9,6 +9,7 @@ #include <linux/mnt_namespace.h> #include <linux/mount.h> #include <linux/fs.h> +#include <linux/nsproxy.h> #include "internal.h" #include "pnode.h" @@ -220,6 +221,7 @@ static struct mount *get_source(struct mount *dest, int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, struct mount *source_mnt, struct list_head *tree_list) { + struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct mount *m, *child; int ret = 0; struct mount *prev_dest_mnt = dest_mnt; @@ -237,6 +239,10 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); + /* Notice when we are propagating across user namespaces */ + if (m->mnt_ns->user_ns != user_ns) + type |= CL_UNPRIVILEGED; + child = copy_tree(source, source->mnt.mnt_root, type); if (IS_ERR(child)) { ret = PTR_ERR(child); diff --git a/fs/pnode.h b/fs/pnode.h index 19b853a3445c..a0493d5ebfbf 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -23,6 +23,7 @@ #define CL_MAKE_SHARED 0x08 #define CL_PRIVATE 0x10 #define CL_SHARED_TO_SLAVE 0x20 +#define CL_UNPRIVILEGED 0x40 static inline void set_mnt_shared(struct mount *mnt) { diff --git a/fs/proc/array.c b/fs/proc/array.c index f7ed9ee46eb9..cbd0f1b324b9 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -143,6 +143,7 @@ static const char * const task_state_array[] = { "x (dead)", /* 64 */ "K (wakekill)", /* 128 */ "W (waking)", /* 256 */ + "P (parked)", /* 512 */ }; static inline const char *get_task_state(struct task_struct *tsk) diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 4b3b3ffb52f1..21e1a8f1659d 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -755,37 +755,8 @@ void pde_put(struct proc_dir_entry *pde) free_proc_entry(pde); } -/* - * Remove a /proc entry and free it if it's not currently in use. - */ -void remove_proc_entry(const char *name, struct proc_dir_entry *parent) +static void entry_rundown(struct proc_dir_entry *de) { - struct proc_dir_entry **p; - struct proc_dir_entry *de = NULL; - const char *fn = name; - unsigned int len; - - spin_lock(&proc_subdir_lock); - if (__xlate_proc_name(name, &parent, &fn) != 0) { - spin_unlock(&proc_subdir_lock); - return; - } - len = strlen(fn); - - for (p = &parent->subdir; *p; p=&(*p)->next ) { - if (proc_match(len, fn, *p)) { - de = *p; - *p = de->next; - de->next = NULL; - break; - } - } - spin_unlock(&proc_subdir_lock); - if (!de) { - WARN(1, "name '%s'\n", name); - return; - } - spin_lock(&de->pde_unload_lock); /* * Stop accepting new callers into module. If you're @@ -817,6 +788,40 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) spin_lock(&de->pde_unload_lock); } spin_unlock(&de->pde_unload_lock); +} + +/* + * Remove a /proc entry and free it if it's not currently in use. + */ +void remove_proc_entry(const char *name, struct proc_dir_entry *parent) +{ + struct proc_dir_entry **p; + struct proc_dir_entry *de = NULL; + const char *fn = name; + unsigned int len; + + spin_lock(&proc_subdir_lock); + if (__xlate_proc_name(name, &parent, &fn) != 0) { + spin_unlock(&proc_subdir_lock); + return; + } + len = strlen(fn); + + for (p = &parent->subdir; *p; p=&(*p)->next ) { + if (proc_match(len, fn, *p)) { + de = *p; + *p = de->next; + de->next = NULL; + break; + } + } + spin_unlock(&proc_subdir_lock); + if (!de) { + WARN(1, "name '%s'\n", name); + return; + } + + entry_rundown(de); if (S_ISDIR(de->mode)) parent->nlink--; @@ -827,3 +832,57 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) pde_put(de); } EXPORT_SYMBOL(remove_proc_entry); + +int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) +{ + struct proc_dir_entry **p; + struct proc_dir_entry *root = NULL, *de, *next; + const char *fn = name; + unsigned int len; + + spin_lock(&proc_subdir_lock); + if (__xlate_proc_name(name, &parent, &fn) != 0) { + spin_unlock(&proc_subdir_lock); + return -ENOENT; + } + len = strlen(fn); + + for (p = &parent->subdir; *p; p=&(*p)->next ) { + if (proc_match(len, fn, *p)) { + root = *p; + *p = root->next; + root->next = NULL; + break; + } + } + if (!root) { + spin_unlock(&proc_subdir_lock); + return -ENOENT; + } + de = root; + while (1) { + next = de->subdir; + if (next) { + de->subdir = next->next; + next->next = NULL; + de = next; + continue; + } + spin_unlock(&proc_subdir_lock); + + entry_rundown(de); + next = de->parent; + if (S_ISDIR(de->mode)) + next->nlink--; + de->nlink = 0; + if (de == root) + break; + pde_put(de); + + spin_lock(&proc_subdir_lock); + de = next; + } + pde_put(root); + return 0; +} +EXPORT_SYMBOL(remove_proc_subtree); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index a86aebc9ba7c..869116c2afbe 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -446,9 +446,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = { struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { - struct inode *inode = iget_locked(sb, de->low_ino); + struct inode *inode = new_inode_pseudo(sb); - if (inode && (inode->i_state & I_NEW)) { + if (inode) { + inode->i_ino = de->low_ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; PROC_I(inode)->pde = de; @@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) inode->i_fop = de->proc_fops; } } - unlock_new_inode(inode); } else pde_put(de); return inode; diff --git a/fs/proc/root.c b/fs/proc/root.c index c6e9fac26bac..9c7fab1d23f0 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -16,6 +16,7 @@ #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> +#include <linux/user_namespace.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include <linux/parser.h> @@ -108,6 +109,9 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, } else { ns = task_active_pid_ns(current); options = data; + + if (!current_user_ns()->may_mount_proc) + return ERR_PTR(-EPERM); } sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 05ae3c97f7a5..3e64169ef527 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1439,8 +1439,11 @@ static void __dquot_initialize(struct inode *inode, int type) * did a write before quota was turned on */ rsv = inode_get_rsv_space(inode); - if (unlikely(rsv)) + if (unlikely(rsv)) { + spin_lock(&dq_data_lock); dquot_resv_space(inode->i_dquot[cnt], rsv); + spin_unlock(&dq_data_lock); + } } } out_err: diff --git a/fs/read_write.c b/fs/read_write.c index a698eff457fb..e6ddc8dceb96 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -17,6 +17,7 @@ #include <linux/splice.h> #include <linux/compat.h> #include "read_write.h" +#include "internal.h" #include <asm/uaccess.h> #include <asm/unistd.h> @@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof EXPORT_SYMBOL(do_sync_write); +ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) +{ + mm_segment_t old_fs; + const char __user *p; + ssize_t ret; + + if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) + return -EINVAL; + + old_fs = get_fs(); + set_fs(get_ds()); + p = (__force const char __user *)buf; + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; + if (file->f_op->write) + ret = file->f_op->write(file, p, count, pos); + else + ret = do_sync_write(file, p, count, pos); + set_fs(old_fs); + if (ret > 0) { + fsnotify_modify(file); + add_wchar(current, ret); + } + inc_syscw(current); + return ret; +} + ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 194113b1b11b..f8a23c3078f8 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1147,8 +1147,7 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin "on filesystem root."); return 0; } - qf_names[qtype] = - kmalloc(strlen(arg) + 1, GFP_KERNEL); + qf_names[qtype] = kstrdup(arg, GFP_KERNEL); if (!qf_names[qtype]) { reiserfs_warning(s, "reiserfs-2502", "not enough memory " @@ -1156,7 +1155,6 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin "quotafile name."); return 0; } - strcpy(qf_names[qtype], arg); if (qtype == USRQUOTA) *mount_options |= 1 << REISERFS_USRQUOTA; else diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index c196369fe408..4cce1d9552fb 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset, if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) return -ENOSPC; - if (name[0] == '.' && (name[1] == '\0' || - (name[1] == '.' && name[2] == '\0'))) + if (name[0] == '.' && (namelen < 2 || + (namelen == 2 && name[1] == '.'))) return 0; dentry = lookup_one_len(name, dbuf->xadir, namelen); diff --git a/fs/splice.c b/fs/splice.c index 718bd0056384..29e394e49ddd 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -31,6 +31,7 @@ #include <linux/security.h> #include <linux/gfp.h> #include <linux/socket.h> +#include "internal.h" /* * Attempt to steal a page from a pipe buffer. This should perhaps go into @@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, { int ret; void *data; + loff_t tmp = sd->pos; data = buf->ops->map(pipe, buf, 0); - ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); + ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); buf->ops->unmap(pipe, buf, data); return ret; diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 260e3928d4f5..60553a9053ca 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -489,6 +489,7 @@ static struct file_system_type squashfs_fs_type = { .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV }; +MODULE_ALIAS_FS("squashfs"); static const struct super_operations squashfs_super_ops = { .alloc_inode = squashfs_alloc_inode, diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 2fbdff6be25c..e14512678c9b 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -1020,6 +1020,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) ino = parent_sd->s_ino; if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) filp->f_pos++; + else + return 0; } if (filp->f_pos == 1) { if (parent_sd->s_parent) @@ -1028,6 +1030,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) ino = parent_sd->s_ino; if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) filp->f_pos++; + else + return 0; } mutex_lock(&sysfs_mutex); for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); @@ -1058,10 +1062,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) return 0; } +static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence) +{ + struct inode *inode = file_inode(file); + loff_t ret; + + mutex_lock(&inode->i_mutex); + ret = generic_file_llseek(file, offset, whence); + mutex_unlock(&inode->i_mutex); + + return ret; +} const struct file_operations sysfs_dir_operations = { .read = generic_read_dir, .readdir = sysfs_readdir, .release = sysfs_dir_release, - .llseek = generic_file_llseek, + .llseek = sysfs_dir_llseek, }; diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index 8d924b5ec733..afd83273e6ce 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/magic.h> #include <linux/slab.h> +#include <linux/user_namespace.h> #include "sysfs.h" @@ -111,6 +112,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, struct super_block *sb; int error; + if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs) + return ERR_PTR(-EPERM); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); diff --git a/fs/sysv/super.c b/fs/sysv/super.c index a39938b1feea..d0c6a007ce83 100644 --- a/fs/sysv/super.c +++ b/fs/sysv/super.c @@ -555,6 +555,7 @@ static struct file_system_type v7_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("v7"); +MODULE_ALIAS("v7"); static int __init init_sysv_fs(void) { diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ac838b844936..f21acf0ef01f 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) c->remounting_rw = 1; c->ro_mount = 0; + if (c->space_fixup) { + err = ubifs_fixup_free_space(c); + if (err) + return err; + } + err = check_free_space(c); if (err) goto out; @@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) err = dbg_check_space_info(c); } - if (c->space_fixup) { - err = ubifs_fixup_free_space(c); - if (err) - goto out; - } - mutex_unlock(&c->umount_mutex); return err; diff --git a/fs/udf/super.c b/fs/udf/super.c index bc5b30a819e8..9ac4057a86c9 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -118,6 +118,7 @@ static struct file_system_type udf_fstype = { .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; +MODULE_ALIAS_FS("udf"); static struct kmem_cache *udf_inode_cachep; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 4e8f0df82d02..8459b5d8cb71 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1334,6 +1334,12 @@ _xfs_buf_ioapply( int size; int i; + /* + * Make sure we capture only current IO errors rather than stale errors + * left over from previous use of the buffer (e.g. failed readahead). + */ + bp->b_error = 0; + if (bp->b_flags & XBF_WRITE) { if (bp->b_flags & XBF_SYNCIO) rw = WRITE_SYNC; diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 912d83d8860a..5a30dd899d2b 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -325,7 +325,7 @@ xfs_iomap_eof_want_preallocate( * rather than falling short due to things like stripe unit/width alignment of * real extents. */ -STATIC int +STATIC xfs_fsblock_t xfs_iomap_eof_prealloc_initial_size( struct xfs_mount *mp, struct xfs_inode *ip, @@ -413,7 +413,7 @@ xfs_iomap_prealloc_size( * have a large file on a small filesystem and the above * lowspace thresholds are smaller than MAXEXTLEN. */ - while (alloc_blocks >= freesp) + while (alloc_blocks && alloc_blocks >= freesp) alloc_blocks >>= 4; } diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 555d0337ad95..b327b5a9296d 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -235,6 +235,9 @@ extern void acpi_processor_unregister_performance(struct if a _PPC object exists, rmmod is disallowed then */ int acpi_processor_notify_smm(struct module *calling_module); +/* parsing the _P* objects. */ +extern int acpi_processor_get_performance_info(struct acpi_processor *pr); + /* for communication between multiple parts of the processor kernel module */ DECLARE_PER_CPU(struct acpi_processor *, processors); extern struct acpi_processor_errata errata; diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 1ced6413ea03..33bd2de3bc1e 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -136,12 +136,6 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 14883026015d..811fb1e9b061 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -92,6 +92,16 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) */ #include <asm-generic/cmpxchg-local.h> +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#endif + +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 25f01d0bc149..b1b1fa6ffffe 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -99,7 +99,12 @@ struct mmu_gather { unsigned int need_flush : 1, /* Did free PTEs */ fast_mode : 1; /* No batching */ - unsigned int fullmm; + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ + unsigned int fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index a386b0b654cc..918e8fe2f5e9 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -581,7 +581,11 @@ {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ @@ -592,6 +596,13 @@ {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ diff --git a/include/linux/ata.h b/include/linux/ata.h index 8f7a3d68371a..ee0bd9524055 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) } } -static inline bool atapi_command_packet_set(const u16 *dev_id) +static inline int atapi_command_packet_set(const u16 *dev_id) { return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 0ea61e07a91c..7c2e030e72f1 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -12,7 +12,6 @@ struct blk_trace { int trace_state; - bool rq_based; struct rchan *rchan; unsigned long __percpu *sequence; unsigned char __percpu *msg_data; diff --git a/include/linux/capability.h b/include/linux/capability.h index 98503b792369..d9a4f7f40f32 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -35,6 +35,7 @@ struct cpu_vfs_cap_data { #define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) +struct file; struct inode; struct dentry; struct user_namespace; @@ -211,6 +212,7 @@ extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool nsown_capable(int cap); extern bool inode_capable(const struct inode *inode, int cap); +extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/compat.h b/include/linux/compat.h index 76a87fb57ac2..377cd8c3395e 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -141,11 +141,11 @@ typedef struct { } compat_sigset_t; struct compat_sigaction { -#ifndef __ARCH_HAS_ODD_SIGACTION +#ifndef __ARCH_HAS_IRIX_SIGACTION compat_uptr_t sa_handler; compat_ulong_t sa_flags; #else - compat_ulong_t sa_flags; + compat_uint_t sa_flags; compat_uptr_t sa_handler; #endif #ifdef __ARCH_HAS_SA_RESTORER diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index a975de1ff59f..3bd46f766751 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -51,7 +51,7 @@ struct task_struct; extern void debug_show_all_locks(void); extern void debug_show_held_locks(struct task_struct *task); extern void debug_check_no_locks_freed(const void *from, unsigned long len); -extern void debug_check_no_locks_held(void); +extern void debug_check_no_locks_held(struct task_struct *task); #else static inline void debug_show_all_locks(void) { @@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len) } static inline void -debug_check_no_locks_held(void) +debug_check_no_locks_held(struct task_struct *task) { } #endif diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index e83ef39b3bea..fe8c4476f7e4 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data { #endif #else /* !CONFIG_PM_DEVFREQ */ -static struct devfreq *devfreq_add_device(struct device *dev, +static inline struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data) @@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev, return NULL; } -static int devfreq_remove_device(struct devfreq *devfreq) +static inline int devfreq_remove_device(struct devfreq *devfreq) { return 0; } -static int devfreq_suspend_device(struct devfreq *devfreq) +static inline int devfreq_suspend_device(struct devfreq *devfreq) { return 0; } -static int devfreq_resume_device(struct devfreq *devfreq) +static inline int devfreq_resume_device(struct devfreq *devfreq) { return 0; } -static struct opp *devfreq_recommended_opp(struct device *dev, +static inline struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { - return -EINVAL; + return ERR_PTR(-EINVAL); } -static int devfreq_register_opp_notifier(struct device *dev, +static inline int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { return -EINVAL; } -static int devfreq_unregister_opp_notifier(struct device *dev, +static inline int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { return -EINVAL; diff --git a/include/linux/edac.h b/include/linux/edac.h index 4fd4999ccb5b..0b763276f619 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -561,7 +561,6 @@ struct csrow_info { u32 ue_count; /* Uncorrectable Errors for this csrow */ u32 ce_count; /* Correctable Errors for this csrow */ - u32 nr_pages; /* combined pages count of all channels */ struct mem_ctl_info *mci; /* the parent */ @@ -676,11 +675,11 @@ struct mem_ctl_info { * sees memory sticks ("dimms"), and the ones that sees memory ranks. * All old memory controllers enumerate memories per rank, but most * of the recent drivers enumerate memories per DIMM, instead. - * When the memory controller is per rank, mem_is_per_rank is true. + * When the memory controller is per rank, csbased is true. */ unsigned n_layers; struct edac_mc_layer *layers; - bool mem_is_per_rank; + bool csbased; /* * DIMM info. Will eventually remove the entire csrows_info some day @@ -741,8 +740,6 @@ struct mem_ctl_info { u32 fake_inject_ue; u16 fake_inject_count; #endif - __u8 csbased : 1, /* csrow-based memory controller */ - __resv : 7; }; #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 9bf2f1fcae27..3d7df3d32c66 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, unsigned long count, u64 *max_size, int *reset_type); +typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); /* * EFI Configuration Table and GUID definitions @@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos #ifdef CONFIG_X86 extern void efi_late_init(void); extern void efi_free_boot_services(void); +extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); #else static inline void efi_late_init(void) {} static inline void efi_free_boot_services(void) {} + +static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) +{ + return EFI_SUCCESS; +} #endif extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); extern u64 efi_get_iobase (void); @@ -731,7 +738,7 @@ struct efivar_operations { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; - efi_query_variable_info_t *query_variable_info; + efi_query_variable_store_t *query_variable_store; }; struct efivars { diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 043a5cf8b5ba..e70df40d84f6 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -3,7 +3,6 @@ #ifndef FREEZER_H_INCLUDED #define FREEZER_H_INCLUDED -#include <linux/debug_locks.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/atomic.h> @@ -49,8 +48,6 @@ extern void thaw_kernel_threads(void); static inline bool try_to_freeze(void) { - if (!(current->flags & PF_NOFREEZE)) - debug_check_no_locks_held(); might_sleep(); if (likely(!freezing(current))) return false; diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 729eded4b24f..2b93a9a5a1e6 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -50,4 +50,6 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, spin_unlock(&fs->lock); } +extern bool current_chrooted(void); + #endif /* _LINUX_FS_STRUCT_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e5ca8ef50e9b..52da2a250795 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * that the call back has its own recursion protection. If it does * not set this, then the ftrace infrastructure will add recursion * protection for the caller. + * STUB - The ftrace_ops is just a place holder. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -98,6 +99,7 @@ enum { FTRACE_OPS_FL_SAVE_REGS = 1 << 4, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, + FTRACE_OPS_FL_STUB = 1 << 7, }; struct ftrace_ops { @@ -394,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos); ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos); -loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence); int ftrace_regex_release(struct inode *inode, struct file *file); void __init @@ -567,6 +568,8 @@ static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } #endif /* CONFIG_DYNAMIC_FTRACE */ +loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence); + /* totally disable ftrace - can not re-enable after this */ void ftrace_kill(void); diff --git a/include/linux/hash.h b/include/linux/hash.h index 61c97ae22e01..f09a0ae4d858 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -15,6 +15,7 @@ */ #include <asm/types.h> +#include <linux/compiler.h> /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ #define GOLDEN_RATIO_PRIME_32 0x9e370001UL @@ -31,7 +32,7 @@ #error Wordsize not 32 or 64 #endif -static inline u64 hash_64(u64 val, unsigned int bits) +static __always_inline u64 hash_64(u64 val, unsigned int bits) { u64 hash = val; diff --git a/include/linux/idr.h b/include/linux/idr.h index a6f38b5c34e4..2640c7e99e51 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -73,8 +73,6 @@ struct idr { */ void *idr_find_slowpath(struct idr *idp, int id); -int idr_pre_get(struct idr *idp, gfp_t gfp_mask); -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); void idr_preload(gfp_t gfp_mask); int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); int idr_for_each(struct idr *idp, @@ -99,7 +97,7 @@ static inline void idr_preload_end(void) /** * idr_find - return pointer for given id - * @idp: idr handle + * @idr: idr handle * @id: lookup key * * Return the pointer given the id it has been registered with. A %NULL @@ -120,19 +118,6 @@ static inline void *idr_find(struct idr *idr, int id) } /** - * idr_get_new - allocate new idr entry - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @id: pointer to the allocated handle - * - * Simple wrapper around idr_get_new_above() w/ @starting_id of zero. - */ -static inline int idr_get_new(struct idr *idp, void *ptr, int *id) -{ - return idr_get_new_above(idp, ptr, 0, id); -} - -/** * idr_for_each_entry - iterate over an idr's elements of a given type * @idp: idr handle * @entry: the type * to use as cursor @@ -143,7 +128,56 @@ static inline int idr_get_new(struct idr *idp, void *ptr, int *id) entry != NULL; \ ++id, entry = (typeof(entry))idr_get_next((idp), &(id))) -void __idr_remove_all(struct idr *idp); /* don't use */ +/* + * Don't use the following functions. These exist only to suppress + * deprecated warnings on EXPORT_SYMBOL()s. + */ +int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); +int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); +void __idr_remove_all(struct idr *idp); + +/** + * idr_pre_get - reserve resources for idr allocation + * @idp: idr handle + * @gfp_mask: memory allocation flags + * + * Part of old alloc interface. This is going away. Use + * idr_preload[_end]() and idr_alloc() instead. + */ +static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) +{ + return __idr_pre_get(idp, gfp_mask); +} + +/** + * idr_get_new_above - allocate new idr entry above or equal to a start id + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @starting_id: id to start search at + * @id: pointer to the allocated handle + * + * Part of old alloc interface. This is going away. Use + * idr_preload[_end]() and idr_alloc() instead. + */ +static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, + int starting_id, int *id) +{ + return __idr_get_new_above(idp, ptr, starting_id, id); +} + +/** + * idr_get_new - allocate new idr entry + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @id: pointer to the allocated handle + * + * Part of old alloc interface. This is going away. Use + * idr_preload[_end]() and idr_alloc() instead. + */ +static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) +{ + return __idr_get_new_above(idp, ptr, 0, id); +} /** * idr_remove_all - remove all ids from the given idr tree diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 1f86a97ab2e2..8bd12be0b02f 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -227,14 +227,17 @@ struct st_sensor_data { }; #ifdef CONFIG_IIO_BUFFER +irqreturn_t st_sensors_trigger_handler(int irq, void *p); + +int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); +#endif + +#ifdef CONFIG_IIO_TRIGGER int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops); void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); -irqreturn_t st_sensors_trigger_handler(int irq, void *p); - -int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); #else static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index f5dbce50466e..66017028dcb3 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work); #ifdef CONFIG_IRQ_WORK bool irq_work_needs_cpu(void); #else -static bool irq_work_needs_cpu(void) { return false; } +static inline bool irq_work_needs_cpu(void) { return false; } #endif #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 80d36874689b..79fdd80a42d4 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp); unsigned long int_sqrt(unsigned long); extern void bust_spinlocks(int yes); -extern void wake_up_klogd(void); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; extern int panic_on_oops; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d2e6927bbaae..d78d28a733b1 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size; int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); +int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int crash_shrink_memory(unsigned long new_size); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cad77fe09d77..c13958251927 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa); + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc7244cbd..b0bcce0ddc95 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -71,6 +71,7 @@ struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; + unsigned long len; struct kvm_memory_slot *memslot; }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 91c9d109e5f1..eae7a053dc51 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -398,6 +398,7 @@ enum { ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ + ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/list.h b/include/linux/list.h index d991cc147c98..6a1f8df9144b 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -667,7 +667,9 @@ static inline void hlist_move_list(struct hlist_head *old, pos = n) #define hlist_entry_safe(ptr, type, member) \ - (ptr) ? hlist_entry(ptr, type, member) : NULL + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) /** * hlist_for_each_entry - iterate over list of given type diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 5b18ecde69b5..1aa4f13cdfa6 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -106,6 +106,29 @@ enum max77693_muic_reg { MAX77693_MUIC_REG_END, }; +/* MAX77693 INTMASK1~2 Register */ +#define INTMASK1_ADC1K_SHIFT 3 +#define INTMASK1_ADCERR_SHIFT 2 +#define INTMASK1_ADCLOW_SHIFT 1 +#define INTMASK1_ADC_SHIFT 0 +#define INTMASK1_ADC1K_MASK (1 << INTMASK1_ADC1K_SHIFT) +#define INTMASK1_ADCERR_MASK (1 << INTMASK1_ADCERR_SHIFT) +#define INTMASK1_ADCLOW_MASK (1 << INTMASK1_ADCLOW_SHIFT) +#define INTMASK1_ADC_MASK (1 << INTMASK1_ADC_SHIFT) + +#define INTMASK2_VIDRM_SHIFT 5 +#define INTMASK2_VBVOLT_SHIFT 4 +#define INTMASK2_DXOVP_SHIFT 3 +#define INTMASK2_DCDTMR_SHIFT 2 +#define INTMASK2_CHGDETRUN_SHIFT 1 +#define INTMASK2_CHGTYP_SHIFT 0 +#define INTMASK2_VIDRM_MASK (1 << INTMASK2_VIDRM_SHIFT) +#define INTMASK2_VBVOLT_MASK (1 << INTMASK2_VBVOLT_SHIFT) +#define INTMASK2_DXOVP_MASK (1 << INTMASK2_DXOVP_SHIFT) +#define INTMASK2_DCDTMR_MASK (1 << INTMASK2_DCDTMR_SHIFT) +#define INTMASK2_CHGDETRUN_MASK (1 << INTMASK2_CHGDETRUN_SHIFT) +#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT) + /* MAX77693 MUIC - STATUS1~3 Register */ #define STATUS1_ADC_SHIFT (0) #define STATUS1_ADCLOW_SHIFT (5) diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index a4d13d7cd001..3bbda22721ea 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -221,6 +221,7 @@ struct palmas_clk_platform_data { }; struct palmas_platform_data { + int irq_flags; int gpio_base; /* bit value to be loaded to the POWER_CTRL register */ diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index aaceab402ec5..6d309032dc0d 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -323,5 +323,6 @@ int tps65912_device_init(struct tps65912 *tps65912); void tps65912_device_exit(struct tps65912 *tps65912); int tps65912_irq_init(struct tps65912 *tps65912, int irq, struct tps65912_platform_data *pdata); +int tps65912_irq_exit(struct tps65912 *tps65912); #endif /* __LINUX_MFD_TPS65912_H */ diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h index b132067e9e99..867aa23f9370 100644 --- a/include/linux/mfd/wm831x/auxadc.h +++ b/include/linux/mfd/wm831x/auxadc.h @@ -15,6 +15,8 @@ #ifndef __MFD_WM831X_AUXADC_H__ #define __MFD_WM831X_AUXADC_H__ +struct wm831x; + /* * R16429 (0x402D) - AuxADC Data */ diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index 4a3b83a77614..76c22648436f 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -20,6 +20,7 @@ #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/regmap.h> +#include <linux/mfd/wm831x/auxadc.h> /* * Register values. @@ -355,7 +356,6 @@ enum wm831x_parent { }; struct wm831x; -enum wm831x_auxadc; typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x, enum wm831x_auxadc input); diff --git a/include/linux/mm.h b/include/linux/mm.h index 7acc9dc73c9f..e2091b88d24c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ -#define VM_POPULATE 0x00001000 #define VM_LOCKED 0x00002000 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ @@ -1612,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); + struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags, diff --git a/include/linux/mman.h b/include/linux/mman.h index 61c7a87e5d2b..9aa863da287f 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -79,8 +79,6 @@ calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | - ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) | - (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ? - VM_POPULATE : 0); + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); } #endif /* _LINUX_MMAN_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ede274957e05..c74092eebf5c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone) return test_bit(ZONE_OOM_LOCKED, &zone->flags); } -static inline unsigned zone_end_pfn(const struct zone *zone) +static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; } diff --git a/include/linux/mount.h b/include/linux/mount.h index d7029f4a191a..73005f9957ea 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -47,6 +47,8 @@ struct mnt_namespace; #define MNT_INTERNAL 0x4000 +#define MNT_LOCK_READONLY 0x400000 + struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 7ccb3c59ed60..ef52d9c91459 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -187,6 +187,13 @@ typedef enum { * This happens with the Renesas AG-AND chips, possibly others. */ #define BBT_AUTO_REFRESH 0x00000080 +/* + * Chip requires ready check on read (for auto-incremented sequential read). + * True only for small page devices; large page devices do not support + * autoincrement. + */ +#define NAND_NEED_READRDY 0x00000100 + /* Chip does not allow subpage writes */ #define NAND_NO_SUBPAGE_WRITE 0x00000200 diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 9121595a8ebf..433da8a1a426 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -53,6 +53,9 @@ struct mutex { #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) struct task_struct *owner; #endif +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER + void *spin_mlock; /* Spinner MCS lock */ +#endif #ifdef CONFIG_DEBUG_MUTEXES const char *name; void *magic; diff --git a/include/linux/mxsfb.h b/include/linux/mxsfb.h index f14943d55315..f80af8674342 100644 --- a/include/linux/mxsfb.h +++ b/include/linux/mxsfb.h @@ -24,8 +24,8 @@ #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */ #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ -#define FB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) -#define FB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ +#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) +#define MXSFB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ struct mxsfb_platform_data { struct fb_videomode *mode_list; @@ -44,6 +44,9 @@ struct mxsfb_platform_data { * allocated. If specified,fb_size must also be specified. * fb_phys must be unused by Linux. */ + u32 sync; /* sync mask, contains MXSFB specifics not + * carried in fb_info->var.sync + */ }; #endif /* __LINUX_MXSFB_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3d00fa4b314..6151e903eef0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -210,9 +210,9 @@ struct netdev_hw_addr { #define NETDEV_HW_ADDR_T_SLAVE 3 #define NETDEV_HW_ADDR_T_UNICAST 4 #define NETDEV_HW_ADDR_T_MULTICAST 5 - bool synced; bool global_use; int refcount; + int synced; struct rcu_head rcu_head; }; @@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo { * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, - * struct net_device *dev) + * struct net_device *dev, u32 filter_mask) * * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); * Called to change device carrier. Soft-devices (like dummy, team, etc) diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index 01d25e6fc792..0214c4c146fa 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h @@ -291,6 +291,7 @@ ip_set_hash_destroy(struct ip_set *set) #define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) #define type_pf_data_next TOKEN(TYPE, PF, _data_next) #define type_pf_data_flags TOKEN(TYPE, PF, _data_flags) +#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags) #ifdef IP_SET_HASH_WITH_NETS #define type_pf_data_match TOKEN(TYPE, PF, _data_match) #else @@ -385,9 +386,9 @@ type_pf_resize(struct ip_set *set, bool retried) struct ip_set_hash *h = set->data; struct htable *t, *orig = h->table; u8 htable_bits = orig->htable_bits; - const struct type_pf_elem *data; + struct type_pf_elem *data; struct hbucket *n, *m; - u32 i, j; + u32 i, j, flags = 0; int ret; retry: @@ -412,9 +413,16 @@ retry: n = hbucket(orig, i); for (j = 0; j < n->pos; j++) { data = ahash_data(n, j); +#ifdef IP_SET_HASH_WITH_NETS + flags = 0; + type_pf_data_reset_flags(data, &flags); +#endif m = hbucket(t, HKEY(data, h->initval, htable_bits)); - ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0); + ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags); if (ret < 0) { +#ifdef IP_SET_HASH_WITH_NETS + type_pf_data_flags(data, flags); +#endif read_unlock_bh(&set->lock); ahash_destroy(t); if (ret == -EAGAIN) @@ -836,9 +844,9 @@ type_pf_tresize(struct ip_set *set, bool retried) struct ip_set_hash *h = set->data; struct htable *t, *orig = h->table; u8 htable_bits = orig->htable_bits; - const struct type_pf_elem *data; + struct type_pf_elem *data; struct hbucket *n, *m; - u32 i, j; + u32 i, j, flags = 0; int ret; /* Try to cleanup once */ @@ -873,10 +881,17 @@ retry: n = hbucket(orig, i); for (j = 0; j < n->pos; j++) { data = ahash_tdata(n, j); +#ifdef IP_SET_HASH_WITH_NETS + flags = 0; + type_pf_data_reset_flags(data, &flags); +#endif m = hbucket(t, HKEY(data, h->initval, htable_bits)); - ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, - ip_set_timeout_get(type_pf_data_timeout(data))); + ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags, + ip_set_timeout_get(type_pf_data_timeout(data))); if (ret < 0) { +#ifdef IP_SET_HASH_WITH_NETS + type_pf_data_flags(data, flags); +#endif read_unlock_bh(&set->lock); ahash_destroy(t); if (ret == -EAGAIN) @@ -1187,6 +1202,7 @@ type_pf_gc_init(struct ip_set *set) #undef type_pf_data_tlist #undef type_pf_data_next #undef type_pf_data_flags +#undef type_pf_data_reset_flags #undef type_pf_data_match #undef type_pf_elem diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c25cccaa555a..4fa3b0b9b071 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -137,6 +137,34 @@ enum { NVME_LBAF_RP_DEGRADED = 3, }; +struct nvme_smart_log { + __u8 critical_warning; + __u8 temperature[2]; + __u8 avail_spare; + __u8 spare_thresh; + __u8 percent_used; + __u8 rsvd6[26]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 host_reads[16]; + __u8 host_writes[16]; + __u8 ctrl_busy_time[16]; + __u8 power_cycles[16]; + __u8 power_on_hours[16]; + __u8 unsafe_shutdowns[16]; + __u8 media_errors[16]; + __u8 num_err_log_entries[16]; + __u8 rsvd192[320]; +}; + +enum { + NVME_SMART_CRIT_SPARE = 1 << 0, + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_SMART_CRIT_RELIABILITY = 1 << 2, + NVME_SMART_CRIT_MEDIA = 1 << 3, + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, +}; + struct nvme_lba_range_type { __u8 type; __u8 attributes; diff --git a/include/linux/pci.h b/include/linux/pci.h index e19d8648e0e6..e73dfa308b87 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -934,6 +934,7 @@ void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); +void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e47ee462c2f2..1d795df6f4cf 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -799,6 +799,12 @@ static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } #endif +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) +extern void perf_restore_debug_store(void); +#else +static inline void perf_restore_debug_store(void) { } +#endif + #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) /* diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578e..87a03c746f17 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -93,14 +93,20 @@ do { \ #else /* !CONFIG_PREEMPT_COUNT */ -#define preempt_disable() do { } while (0) -#define sched_preempt_enable_no_resched() do { } while (0) -#define preempt_enable_no_resched() do { } while (0) -#define preempt_enable() do { } while (0) - -#define preempt_disable_notrace() do { } while (0) -#define preempt_enable_no_resched_notrace() do { } while (0) -#define preempt_enable_notrace() do { } while (0) +/* + * Even if we don't have any preemption, we need preempt disable/enable + * to be barriers, so that we don't have things like get_user/put_user + * that can cause faults and scheduling migrate into our preempt-protected + * region. + */ +#define preempt_disable() barrier() +#define sched_preempt_enable_no_resched() barrier() +#define preempt_enable_no_resched() barrier() +#define preempt_enable() barrier() + +#define preempt_disable_notrace() barrier() +#define preempt_enable_no_resched_notrace() barrier() +#define preempt_enable_notrace() barrier() #endif /* CONFIG_PREEMPT_COUNT */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 1249a54d17e0..822171fcb1c8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -134,6 +134,8 @@ extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; +extern void wake_up_klogd(void); + void log_buf_kexec_setup(void); void __init setup_log_buf(int early); #else @@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, return false; } +static inline void wake_up_klogd(void) +{ +} + static inline void log_buf_kexec_setup(void) { } diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 8307f2f94d86..94dfb2aa5533 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, const struct file_operations *proc_fops, void *data); extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); +extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent); struct pid_namespace; @@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name, return NULL; } #define remove_proc_entry(name, parent) do {} while (0) +#define remove_proc_subtree(name, parent) do {} while (0) static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) {return NULL;} diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 5ae8456d9670..c23099413ad6 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -14,6 +14,7 @@ */ #include <linux/cgroup.h> +#include <linux/errno.h> /* * The core object. the cgroup that wishes to account for some diff --git a/include/linux/sched.h b/include/linux/sched.h index d35d2b6ddbfb..2d02c76a01be 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #define TASK_DEAD 64 #define TASK_WAKEKILL 128 #define TASK_WAKING 256 -#define TASK_STATE_MAX 512 +#define TASK_PARKED 512 +#define TASK_STATE_MAX 1024 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -320,7 +321,6 @@ extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); -extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); struct nsproxy; struct user_namespace; diff --git a/include/linux/security.h b/include/linux/security.h index eee7478cda70..032c366ef1c6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * This hook can be used by the module to update any security state * associated with the TUN device's security structure. * @security pointer to the TUN devices's security structure. + * @skb_owned_by: + * This hook sets the packet's owning sock. + * @skb is the packet. + * @sk the sock which owns the packet. * * Security hooks for XFRM operations. * @@ -1638,6 +1642,7 @@ struct security_operations { int (*tun_dev_attach_queue) (void *security); int (*tun_dev_attach) (struct sock *sk, void *security); int (*tun_dev_open) (void *security); + void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); +void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); + #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, struct sock *other, @@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security) { return 0; } + +static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +} + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/include/linux/signal.h b/include/linux/signal.h index a2dcb94ea49d..9475c5cb28bc 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -250,11 +250,11 @@ extern int show_unhandled_signals; extern int sigsuspend(sigset_t *); struct sigaction { -#ifndef __ARCH_HAS_ODD_SIGACTION +#ifndef __ARCH_HAS_IRIX_SIGACTION __sighandler_t sa_handler; unsigned long sa_flags; #else - unsigned long sa_flags; + unsigned int sa_flags; __sighandler_t sa_handler; #endif #ifdef __ARCH_HAS_SA_RESTORER diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 821c7f45d2a7..b8292d8cc9fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -500,7 +500,7 @@ struct sk_buff { union { __u32 mark; __u32 dropcount; - __u32 avail_size; + __u32 reserved_tailroom; }; sk_buff_data_t inner_transport_header; @@ -1288,11 +1288,13 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * do not lose pfmemalloc information as the pages would not be * allocated using __GFP_MEMALLOC. */ - if (page->pfmemalloc && !page->mapping) - skb->pfmemalloc = true; frag->page.p = page; frag->page_offset = off; skb_frag_size_set(frag, size); + + page = compound_head(page); + if (page->pfmemalloc && !page->mapping) + skb->pfmemalloc = true; } /** @@ -1447,7 +1449,10 @@ static inline int skb_tailroom(const struct sk_buff *skb) */ static inline int skb_availroom(const struct sk_buff *skb) { - return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; + if (skb_is_nonlinear(skb)) + return 0; + + return skb->end - skb->tail - skb->reserved_tailroom; } /** @@ -2638,6 +2643,13 @@ static inline void nf_reset(struct sk_buff *skb) #endif } +static inline void nf_reset_trace(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) + skb->nf_trace = 0; +#endif +} + /* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) { diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -16,7 +16,10 @@ * In the debug case, 1 means unlocked, 0 means locked. (the values * are inverted, to catch initialization bugs) * - * No atomicity anywhere, we are on UP. + * No atomicity anywhere, we are on UP. However, we still need + * the compiler barriers, because we do not want the compiler to + * move potentially faulting instructions (notably user accesses) + * into the locked sequence, resulting in non-atomic execution. */ #ifdef CONFIG_DEBUG_SPINLOCK @@ -25,6 +28,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; + barrier(); } static inline void @@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; + barrier(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) char oldval = lock->slock; lock->slock = 0; + barrier(); return oldval > 0; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { + barrier(); lock->slock = 1; } /* * Read-write spinlocks. No debug version. */ -#define arch_read_lock(lock) do { (void)(lock); } while (0) -#define arch_write_lock(lock) do { (void)(lock); } while (0) -#define arch_read_trylock(lock) ({ (void)(lock); 1; }) -#define arch_write_trylock(lock) ({ (void)(lock); 1; }) -#define arch_read_unlock(lock) do { (void)(lock); } while (0) -#define arch_write_unlock(lock) do { (void)(lock); } while (0) +#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock) do { (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) -# define arch_spin_unlock(lock) do { (void)(lock); } while (0) -# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) +# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ #define arch_spin_is_contended(lock) (((void)(lock), 0)) diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 9e492be5244b..6fcfe99bd999 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -219,6 +219,7 @@ #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 +#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400 #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ @@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id { void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, enum ssb_pmu_ldo_volt_id id, u32 voltage); void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid); #endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 2de42f9401d2..a5ffd32642fd 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -25,6 +25,7 @@ extern int swiotlb_force; extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); extern unsigned long swiotlb_nr_tbl(void); +unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); /* diff --git a/include/linux/thermal.h b/include/linux/thermal.h index f0bd7f90a90d..e3c0ae9bb1fa 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -44,7 +44,7 @@ /* Adding event notification support elements */ #define THERMAL_GENL_FAMILY_NAME "thermal_event" #define THERMAL_GENL_VERSION 0x01 -#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" +#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h new file mode 100644 index 000000000000..cbb20afdbc01 --- /dev/null +++ b/include/linux/ucs2_string.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_UCS2_STRING_H_ +#define _LINUX_UCS2_STRING_H_ + +#include <linux/types.h> /* for size_t */ +#include <linux/stddef.h> /* for NULL */ + +typedef u16 ucs2_char_t; + +unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength); +unsigned long ucs2_strlen(const ucs2_char_t *s); +unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); +int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); + +#endif /* _LINUX_UCS2_STRING_H_ */ diff --git a/include/linux/udp.h b/include/linux/udp.h index 9d81de123c90..42278bbf7a88 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -68,6 +68,7 @@ struct udp_sock { * For encapsulation sockets. */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); + void (*encap_destroy)(struct sock *sk); }; static inline struct udp_sock *udp_sk(const struct sock *sk) diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3b8f9d4fc3fe..cc25b70af33c 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -127,6 +127,7 @@ struct cdc_ncm_ctx { u16 connected; }; +extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 3c671c1b37f6..8860594d6364 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -60,7 +60,7 @@ struct usb_configuration; * @name: For diagnostics, identifies the function. * @strings: tables of strings, keyed by identifiers assigned during bind() * and by language IDs provided in control requests - * @descriptors: Table of full (or low) speed descriptors, using interface and + * @fs_descriptors: Table of full (or low) speed descriptors, using interface and * string identifiers assigned during @bind(). If this pointer is null, * the function will not be available at full speed (or at low speed). * @hs_descriptors: Table of high speed descriptors, using interface and @@ -290,6 +290,7 @@ enum { * after function notifications * @resume: Notifies configuration when the host restarts USB traffic, * before function notifications + * @gadget_driver: Gadget driver controlling this driver * * Devices default to reporting self powered operation. Devices which rely * on bus powered operation should report this in their @bind method. diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 0a78df5f6cfd..59694b5e5e90 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -357,6 +357,7 @@ struct hc_driver { */ int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); + int (*find_raw_port_number)(struct usb_hcd *, int); }; extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); @@ -396,6 +397,7 @@ extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd); extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); +extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index ef9be7e1e190..1819b59aab2a 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -66,6 +66,7 @@ * port. * @flags: usb serial port flags * @write_wait: a wait_queue_head_t used by the port. + * @delta_msr_wait: modem-status-change wait queue * @work: work queue entry for the line discipline waking up. * @throttled: nonzero if the read urb is inactive to throttle the device * @throttle_req: nonzero if the tty wants to throttle us @@ -112,6 +113,7 @@ struct usb_serial_port { unsigned long flags; wait_queue_head_t write_wait; + wait_queue_head_t delta_msr_wait; struct work_struct work; char throttled; char throttle_req; diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 6f033a415ecb..5c295c26ad37 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h @@ -181,8 +181,16 @@ /*-------------------------------------------------------------------------*/ +#if IS_ENABLED(CONFIG_USB_ULPI) struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags); +#else +static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, + unsigned int flags) +{ + return NULL; +} +#endif #ifdef CONFIG_USB_ULPI_VIEWPORT /* access ops for controllers with a viewport register */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 4ce009324933..b6b215f13b45 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -26,6 +26,8 @@ struct user_namespace { kuid_t owner; kgid_t group; unsigned int proc_inum; + bool may_mount_sysfs; + bool may_mount_proc; }; extern struct user_namespace init_user_ns; @@ -82,4 +84,6 @@ static inline void put_user_ns(struct user_namespace *ns) #endif +void update_mnt_policy(struct user_namespace *userns); + #endif /* _LINUX_USER_H */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 40be2a0d8ae1..84a6440f1f19 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -199,6 +199,7 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, /* Device notifier */ extern int register_inet6addr_notifier(struct notifier_block *nb); extern int unregister_inet6addr_notifier(struct notifier_block *nb); +extern int inet6addr_notifier_call_chain(unsigned long val, void *v); extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, struct ipv6_devconf *devconf); diff --git a/include/net/dst.h b/include/net/dst.h index 853cda11e518..1f8fd109e225 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -413,13 +413,15 @@ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) { - return dst->ops->neigh_lookup(dst, NULL, daddr); + struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); + return IS_ERR(n) ? NULL : n; } static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, struct sk_buff *skb) { - return dst->ops->neigh_lookup(dst, skb, NULL); + struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } static inline void dst_link_failure(struct sk_buff *skb) diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 80461c1ae9ef..bb8271d487b7 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h @@ -9,6 +9,7 @@ struct flow_keys { __be32 ports; __be16 port16[2]; }; + u16 thoff; u8 ip_proto; }; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 76c3fe5ecc2e..0a1dcc2fa2f5 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -43,6 +43,13 @@ struct inet_frag_queue { #define INETFRAGS_HASHSZ 64 +/* averaged: + * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / + * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or + * struct frag_queue)) + */ +#define INETFRAGS_MAXDEPTH 128 + struct inet_frags { struct hlist_head hash[INETFRAGS_HASHSZ]; /* This rwlock is a global lock (seperate per IPv4, IPv6 and @@ -76,6 +83,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) __releases(&f->lock); +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, + const char *prefix); static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) { diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9497be1ad4c0..e49db91593a9 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -152,18 +152,16 @@ struct fib_result_nl { }; #ifdef CONFIG_IP_ROUTE_MULTIPATH - #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) - -#define FIB_TABLE_HASHSZ 2 - #else /* CONFIG_IP_ROUTE_MULTIPATH */ - #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ +#ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 - -#endif /* CONFIG_IP_ROUTE_MULTIPATH */ +#else +#define FIB_TABLE_HASHSZ 2 +#endif extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 68c69d54d392..fce8e6b66d55 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -976,6 +976,7 @@ struct netns_ipvs { int sysctl_sync_retries; int sysctl_nat_icmp_send; int sysctl_pmtu_disc; + int sysctl_backup_only; /* ip_vs_lblc */ int sysctl_lblc_expiration; @@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) return ipvs->sysctl_pmtu_disc; } +static inline int sysctl_backup_only(struct netns_ipvs *ipvs) +{ + return ipvs->sync_state & IP_VS_STATE_BACKUP && + ipvs->sysctl_backup_only; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) return 1; } +static inline int sysctl_backup_only(struct netns_ipvs *ipvs) +{ + return 0; +} + #endif /* diff --git a/include/net/ipip.h b/include/net/ipip.h index fd19625ff99d..982141c15200 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -77,15 +77,11 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb, { struct iphdr *iph = ip_hdr(skb); - if (iph->frag_off & htons(IP_DF)) - iph->id = 0; - else { - /* Use inner packet iph-id if possible. */ - if (skb->protocol == htons(ETH_P_IP) && old_iph->id) - iph->id = old_iph->id; - else - __ip_select_ident(iph, dst, - (skb_shinfo(skb)->gso_segs ?: 1) - 1); - } + /* Use inner packet iph-id if possible. */ + if (skb->protocol == htons(ETH_P_IP) && old_iph->id) + iph->id = old_iph->id; + else + __ip_select_ident(iph, dst, + (skb_shinfo(skb)->gso_segs ?: 1) - 1); } #endif diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h index f74109144d3f..f132924cc9da 100644 --- a/include/net/irda/irlmp.h +++ b/include/net/irda/irlmp.h @@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self) return (self && self->lap) ? self->lap->daddr : 0; } -extern const char *irlmp_reasons[]; +const char *irlmp_reason_str(LM_REASON reason); + extern int sysctl_discovery_timeout; extern int sysctl_discovery_slots; extern int sysctl_discovery; diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index cc7c19732389..714cc9a54a4c 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -130,6 +130,14 @@ struct iucv_sock { enum iucv_tx_notify n); }; +struct iucv_skb_cb { + u32 class; /* target class of message */ + u32 tag; /* tag associated with message */ + u32 offset; /* offset for skb receival */ +}; + +#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0])) + /* iucv socket options (SOL_IUCV) */ #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ diff --git a/include/net/scm.h b/include/net/scm.h index 975cca01048b..b11708105681 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm, scm->pid = get_pid(pid); scm->cred = cred ? get_cred(cred) : NULL; scm->creds.pid = pid_vnr(pid); - scm->creds.uid = cred ? cred->euid : INVALID_UID; - scm->creds.gid = cred ? cred->egid : INVALID_GID; + scm->creds.uid = cred ? cred->uid : INVALID_UID; + scm->creds.gid = cred ? cred->gid : INVALID_GID; } static __inline__ void scm_destroy_cred(struct scm_cookie *scm) diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 399162b50a8d..e1379b4e8faf 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *); /* * DISCOVERY LAYER *****************************/ -int fc_disc_init(struct fc_lport *); +void fc_disc_init(struct fc_lport *); +void fc_disc_config(struct fc_lport *, void *); static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) { diff --git a/include/sound/max98090.h b/include/sound/max98090.h index 95efb13f8478..95efb13f8478 100755..100644 --- a/include/sound/max98090.h +++ b/include/sound/max98090.h diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index e1ef63d4a5c4..44a30b108683 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -488,6 +488,7 @@ struct snd_soc_dapm_path { /* status */ u32 connect:1; /* source and sink widgets are connected */ u32 walked:1; /* path has been walked */ + u32 walking:1; /* path is in the process of being walked */ u32 weak:1; /* path ignored for power management */ int (*connected)(struct snd_soc_dapm_widget *source, diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 9961726523d0..9c1467357b03 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce, /** * block_bio_complete - completed all work on the block operation + * @q: queue holding the block operation * @bio: block operation completed * @error: io error value * @@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce, */ TRACE_EVENT(block_bio_complete, - TP_PROTO(struct bio *bio, int error), + TP_PROTO(struct request_queue *q, struct bio *bio, int error), - TP_ARGS(bio, error), + TP_ARGS(q, bio, error), TP_STRUCT__entry( __field( dev_t, dev ) @@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete, ), TP_fast_assign( - __entry->dev = bio->bi_bdev ? - bio->bi_bdev->bd_dev : 0; + __entry->dev = bio->bi_bdev->bd_dev; __entry->sector = bio->bi_sector; __entry->nr_sector = bio->bi_size >> 9; __entry->error = error; diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5a8671e8a67f..e5586caff67a 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch, __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, { 16, "Z" }, { 32, "X" }, { 64, "x" }, - { 128, "W" }) : "R", + { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", __entry->prev_state & TASK_STATE_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h index 11b6ca3e0873..df2f9a0bba6a 100644 --- a/include/uapi/linux/acct.h +++ b/include/uapi/linux/acct.h @@ -107,10 +107,12 @@ struct acct_v3 #define ACORE 0x08 /* ... dumped core */ #define AXSIG 0x10 /* ... was killed by a signal */ -#ifdef __BIG_ENDIAN +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) #define ACCT_BYTEORDER 0x80 /* accounting file is big endian */ -#else +#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) #define ACCT_BYTEORDER 0x00 /* accounting file is little endian */ +#else +#error unspecified endianness #endif #ifndef __KERNEL__ diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h index 86fa7a71336a..bb2554f7fbd1 100644 --- a/include/uapi/linux/aio_abi.h +++ b/include/uapi/linux/aio_abi.h @@ -62,9 +62,9 @@ struct io_event { __s64 res2; /* secondary result */ }; -#if defined(__LITTLE_ENDIAN) +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) #define PADDED(x,y) x, y -#elif defined(__BIG_ENDIAN) +#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) #define PADDED(x,y) y, x #else #error edit for your odd byteorder. diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 4c43b4448792..706d035fa748 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -95,15 +95,10 @@ #ifndef _LINUX_FUSE_H #define _LINUX_FUSE_H -#ifdef __linux__ +#ifdef __KERNEL__ #include <linux/types.h> #else #include <stdint.h> -#define __u64 uint64_t -#define __s64 int64_t -#define __u32 uint32_t -#define __s32 int32_t -#define __u16 uint16_t #endif /* @@ -139,42 +134,42 @@ userspace works under 64bit kernels */ struct fuse_attr { - __u64 ino; - __u64 size; - __u64 blocks; - __u64 atime; - __u64 mtime; - __u64 ctime; - __u32 atimensec; - __u32 mtimensec; - __u32 ctimensec; - __u32 mode; - __u32 nlink; - __u32 uid; - __u32 gid; - __u32 rdev; - __u32 blksize; - __u32 padding; + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint32_t rdev; + uint32_t blksize; + uint32_t padding; }; struct fuse_kstatfs { - __u64 blocks; - __u64 bfree; - __u64 bavail; - __u64 files; - __u64 ffree; - __u32 bsize; - __u32 namelen; - __u32 frsize; - __u32 padding; - __u32 spare[6]; + uint64_t blocks; + uint64_t bfree; + uint64_t bavail; + uint64_t files; + uint64_t ffree; + uint32_t bsize; + uint32_t namelen; + uint32_t frsize; + uint32_t padding; + uint32_t spare[6]; }; struct fuse_file_lock { - __u64 start; - __u64 end; - __u32 type; - __u32 pid; /* tgid */ + uint64_t start; + uint64_t end; + uint32_t type; + uint32_t pid; /* tgid */ }; /** @@ -364,143 +359,143 @@ enum fuse_notify_code { #define FUSE_COMPAT_ENTRY_OUT_SIZE 120 struct fuse_entry_out { - __u64 nodeid; /* Inode ID */ - __u64 generation; /* Inode generation: nodeid:gen must - be unique for the fs's lifetime */ - __u64 entry_valid; /* Cache timeout for the name */ - __u64 attr_valid; /* Cache timeout for the attributes */ - __u32 entry_valid_nsec; - __u32 attr_valid_nsec; + uint64_t nodeid; /* Inode ID */ + uint64_t generation; /* Inode generation: nodeid:gen must + be unique for the fs's lifetime */ + uint64_t entry_valid; /* Cache timeout for the name */ + uint64_t attr_valid; /* Cache timeout for the attributes */ + uint32_t entry_valid_nsec; + uint32_t attr_valid_nsec; struct fuse_attr attr; }; struct fuse_forget_in { - __u64 nlookup; + uint64_t nlookup; }; struct fuse_forget_one { - __u64 nodeid; - __u64 nlookup; + uint64_t nodeid; + uint64_t nlookup; }; struct fuse_batch_forget_in { - __u32 count; - __u32 dummy; + uint32_t count; + uint32_t dummy; }; struct fuse_getattr_in { - __u32 getattr_flags; - __u32 dummy; - __u64 fh; + uint32_t getattr_flags; + uint32_t dummy; + uint64_t fh; }; #define FUSE_COMPAT_ATTR_OUT_SIZE 96 struct fuse_attr_out { - __u64 attr_valid; /* Cache timeout for the attributes */ - __u32 attr_valid_nsec; - __u32 dummy; + uint64_t attr_valid; /* Cache timeout for the attributes */ + uint32_t attr_valid_nsec; + uint32_t dummy; struct fuse_attr attr; }; #define FUSE_COMPAT_MKNOD_IN_SIZE 8 struct fuse_mknod_in { - __u32 mode; - __u32 rdev; - __u32 umask; - __u32 padding; + uint32_t mode; + uint32_t rdev; + uint32_t umask; + uint32_t padding; }; struct fuse_mkdir_in { - __u32 mode; - __u32 umask; + uint32_t mode; + uint32_t umask; }; struct fuse_rename_in { - __u64 newdir; + uint64_t newdir; }; struct fuse_link_in { - __u64 oldnodeid; + uint64_t oldnodeid; }; struct fuse_setattr_in { - __u32 valid; - __u32 padding; - __u64 fh; - __u64 size; - __u64 lock_owner; - __u64 atime; - __u64 mtime; - __u64 unused2; - __u32 atimensec; - __u32 mtimensec; - __u32 unused3; - __u32 mode; - __u32 unused4; - __u32 uid; - __u32 gid; - __u32 unused5; + uint32_t valid; + uint32_t padding; + uint64_t fh; + uint64_t size; + uint64_t lock_owner; + uint64_t atime; + uint64_t mtime; + uint64_t unused2; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t unused3; + uint32_t mode; + uint32_t unused4; + uint32_t uid; + uint32_t gid; + uint32_t unused5; }; struct fuse_open_in { - __u32 flags; - __u32 unused; + uint32_t flags; + uint32_t unused; }; struct fuse_create_in { - __u32 flags; - __u32 mode; - __u32 umask; - __u32 padding; + uint32_t flags; + uint32_t mode; + uint32_t umask; + uint32_t padding; }; struct fuse_open_out { - __u64 fh; - __u32 open_flags; - __u32 padding; + uint64_t fh; + uint32_t open_flags; + uint32_t padding; }; struct fuse_release_in { - __u64 fh; - __u32 flags; - __u32 release_flags; - __u64 lock_owner; + uint64_t fh; + uint32_t flags; + uint32_t release_flags; + uint64_t lock_owner; }; struct fuse_flush_in { - __u64 fh; - __u32 unused; - __u32 padding; - __u64 lock_owner; + uint64_t fh; + uint32_t unused; + uint32_t padding; + uint64_t lock_owner; }; struct fuse_read_in { - __u64 fh; - __u64 offset; - __u32 size; - __u32 read_flags; - __u64 lock_owner; - __u32 flags; - __u32 padding; + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t read_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; }; #define FUSE_COMPAT_WRITE_IN_SIZE 24 struct fuse_write_in { - __u64 fh; - __u64 offset; - __u32 size; - __u32 write_flags; - __u64 lock_owner; - __u32 flags; - __u32 padding; + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t write_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; }; struct fuse_write_out { - __u32 size; - __u32 padding; + uint32_t size; + uint32_t padding; }; #define FUSE_COMPAT_STATFS_SIZE 48 @@ -510,32 +505,32 @@ struct fuse_statfs_out { }; struct fuse_fsync_in { - __u64 fh; - __u32 fsync_flags; - __u32 padding; + uint64_t fh; + uint32_t fsync_flags; + uint32_t padding; }; struct fuse_setxattr_in { - __u32 size; - __u32 flags; + uint32_t size; + uint32_t flags; }; struct fuse_getxattr_in { - __u32 size; - __u32 padding; + uint32_t size; + uint32_t padding; }; struct fuse_getxattr_out { - __u32 size; - __u32 padding; + uint32_t size; + uint32_t padding; }; struct fuse_lk_in { - __u64 fh; - __u64 owner; + uint64_t fh; + uint64_t owner; struct fuse_file_lock lk; - __u32 lk_flags; - __u32 padding; + uint32_t lk_flags; + uint32_t padding; }; struct fuse_lk_out { @@ -543,134 +538,135 @@ struct fuse_lk_out { }; struct fuse_access_in { - __u32 mask; - __u32 padding; + uint32_t mask; + uint32_t padding; }; struct fuse_init_in { - __u32 major; - __u32 minor; - __u32 max_readahead; - __u32 flags; + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; }; struct fuse_init_out { - __u32 major; - __u32 minor; - __u32 max_readahead; - __u32 flags; - __u16 max_background; - __u16 congestion_threshold; - __u32 max_write; + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint16_t max_background; + uint16_t congestion_threshold; + uint32_t max_write; }; #define CUSE_INIT_INFO_MAX 4096 struct cuse_init_in { - __u32 major; - __u32 minor; - __u32 unused; - __u32 flags; + uint32_t major; + uint32_t minor; + uint32_t unused; + uint32_t flags; }; struct cuse_init_out { - __u32 major; - __u32 minor; - __u32 unused; - __u32 flags; - __u32 max_read; - __u32 max_write; - __u32 dev_major; /* chardev major */ - __u32 dev_minor; /* chardev minor */ - __u32 spare[10]; + uint32_t major; + uint32_t minor; + uint32_t unused; + uint32_t flags; + uint32_t max_read; + uint32_t max_write; + uint32_t dev_major; /* chardev major */ + uint32_t dev_minor; /* chardev minor */ + uint32_t spare[10]; }; struct fuse_interrupt_in { - __u64 unique; + uint64_t unique; }; struct fuse_bmap_in { - __u64 block; - __u32 blocksize; - __u32 padding; + uint64_t block; + uint32_t blocksize; + uint32_t padding; }; struct fuse_bmap_out { - __u64 block; + uint64_t block; }; struct fuse_ioctl_in { - __u64 fh; - __u32 flags; - __u32 cmd; - __u64 arg; - __u32 in_size; - __u32 out_size; + uint64_t fh; + uint32_t flags; + uint32_t cmd; + uint64_t arg; + uint32_t in_size; + uint32_t out_size; }; struct fuse_ioctl_iovec { - __u64 base; - __u64 len; + uint64_t base; + uint64_t len; }; struct fuse_ioctl_out { - __s32 result; - __u32 flags; - __u32 in_iovs; - __u32 out_iovs; + int32_t result; + uint32_t flags; + uint32_t in_iovs; + uint32_t out_iovs; }; struct fuse_poll_in { - __u64 fh; - __u64 kh; - __u32 flags; - __u32 events; + uint64_t fh; + uint64_t kh; + uint32_t flags; + uint32_t events; }; struct fuse_poll_out { - __u32 revents; - __u32 padding; + uint32_t revents; + uint32_t padding; }; struct fuse_notify_poll_wakeup_out { - __u64 kh; + uint64_t kh; }; struct fuse_fallocate_in { - __u64 fh; - __u64 offset; - __u64 length; - __u32 mode; - __u32 padding; + uint64_t fh; + uint64_t offset; + uint64_t length; + uint32_t mode; + uint32_t padding; }; struct fuse_in_header { - __u32 len; - __u32 opcode; - __u64 unique; - __u64 nodeid; - __u32 uid; - __u32 gid; - __u32 pid; - __u32 padding; + uint32_t len; + uint32_t opcode; + uint64_t unique; + uint64_t nodeid; + uint32_t uid; + uint32_t gid; + uint32_t pid; + uint32_t padding; }; struct fuse_out_header { - __u32 len; - __s32 error; - __u64 unique; + uint32_t len; + int32_t error; + uint64_t unique; }; struct fuse_dirent { - __u64 ino; - __u64 off; - __u32 namelen; - __u32 type; + uint64_t ino; + uint64_t off; + uint32_t namelen; + uint32_t type; char name[]; }; #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) -#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) +#define FUSE_DIRENT_ALIGN(x) \ + (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1)) #define FUSE_DIRENT_SIZE(d) \ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) @@ -685,47 +681,47 @@ struct fuse_direntplus { FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen) struct fuse_notify_inval_inode_out { - __u64 ino; - __s64 off; - __s64 len; + uint64_t ino; + int64_t off; + int64_t len; }; struct fuse_notify_inval_entry_out { - __u64 parent; - __u32 namelen; - __u32 padding; + uint64_t parent; + uint32_t namelen; + uint32_t padding; }; struct fuse_notify_delete_out { - __u64 parent; - __u64 child; - __u32 namelen; - __u32 padding; + uint64_t parent; + uint64_t child; + uint32_t namelen; + uint32_t padding; }; struct fuse_notify_store_out { - __u64 nodeid; - __u64 offset; - __u32 size; - __u32 padding; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; }; struct fuse_notify_retrieve_out { - __u64 notify_unique; - __u64 nodeid; - __u64 offset; - __u32 size; - __u32 padding; + uint64_t notify_unique; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; }; /* Matches the size of fuse_write_in */ struct fuse_notify_retrieve_in { - __u64 dummy1; - __u64 offset; - __u32 size; - __u32 dummy2; - __u64 dummy3; - __u64 dummy4; + uint64_t dummy1; + uint64_t offset; + uint32_t size; + uint32_t dummy2; + uint64_t dummy3; + uint64_t dummy4; }; #endif /* _LINUX_FUSE_H */ diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index 93f5fa94a431..afafd703ad92 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -33,9 +33,11 @@ enum { PACKET_DIAG_TX_RING, PACKET_DIAG_FANOUT, - PACKET_DIAG_MAX, + __PACKET_DIAG_MAX, }; +#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1) + struct packet_diag_info { __u32 pdi_index; __u32 pdi_version; diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index ee753536ab70..fe1a5406d4d9 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -145,16 +145,18 @@ typedef struct mdp_superblock_s { __u32 failed_disks; /* 4 Number of failed disks */ __u32 spare_disks; /* 5 Number of spare disks */ __u32 sb_csum; /* 6 checksum of the whole superblock */ -#ifdef __BIG_ENDIAN +#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) __u32 events_hi; /* 7 high-order of superblock update count */ __u32 events_lo; /* 8 low-order of superblock update count */ __u32 cp_events_hi; /* 9 high-order of checkpoint update count */ __u32 cp_events_lo; /* 10 low-order of checkpoint update count */ -#else +#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) __u32 events_lo; /* 7 low-order of superblock update count */ __u32 events_hi; /* 8 high-order of superblock update count */ __u32 cp_events_lo; /* 9 low-order of checkpoint update count */ __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ +#else +#error unspecified endianness #endif __u32 recovery_cp; /* 11 recovery checkpoint sector count */ /* There are only valid for minor_version > 90 */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index b6a23a483d74..74c2bf7211f8 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -51,7 +51,10 @@ #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ #define PORT_BRCM_TRUMANAGE 25 -#define PORT_MAX_8250 25 /* max port ID */ +#define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */ +#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */ +#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */ +#define PORT_MAX_8250 28 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index b8a24941db21..b9e2a6a7446f 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h @@ -39,9 +39,11 @@ enum { UNIX_DIAG_MEMINFO, UNIX_DIAG_SHUTDOWN, - UNIX_DIAG_MAX, + __UNIX_DIAG_MAX, }; +#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1) + struct unix_diag_vfs { __u32 udiag_vfs_ino; __u32 udiag_vfs_dev; diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 28447f1594fa..8deb22672ada 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h @@ -30,7 +30,6 @@ */ #define ATMEL_LCDC_WIRING_BGR 0 #define ATMEL_LCDC_WIRING_RGB 1 -#define ATMEL_LCDC_WIRING_RGB555 2 /* LCD Controller info data structure, stored in device platform_data */ @@ -62,6 +61,7 @@ struct atmel_lcdfb_info { void (*atmel_lcdfb_power_control)(int on); struct fb_monspecs *default_monspecs; u32 pseudo_palette[16]; + bool have_intensity_bit; }; #define ATMEL_LCDC_DMABADDR1 0x00 diff --git a/include/xen/events.h b/include/xen/events.h index c6bfe01acf6b..b2b27c6a0f7b 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -90,8 +90,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); /* Bind an PSI pirq to an irq. */ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name, - domid_t domid); + int pirq, const char *name, domid_t domid); #endif /* De-allocates the above mentioned physical interrupt. */ diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 01c3d62436ef..ffd4652de91c 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -138,11 +138,21 @@ struct blkif_request_discard { uint8_t _pad3; } __attribute__((__packed__)); +struct blkif_request_other { + uint8_t _pad1; + blkif_vdev_t _pad2; /* only for read/write requests */ +#ifdef CONFIG_X86_64 + uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ +#endif + uint64_t id; /* private guest value, echoed in resp */ +} __attribute__((__packed__)); + struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; + struct blkif_request_other other; } u; } __attribute__((__packed__)); diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h index 1844d31f4552..7000bb1f6e96 100644 --- a/include/xen/interface/physdev.h +++ b/include/xen/interface/physdev.h @@ -251,6 +251,12 @@ struct physdev_pci_device_add { #define PHYSDEVOP_pci_device_remove 26 #define PHYSDEVOP_restore_msi_ext 27 +/* + * Dom0 should use these two to announce MMIO resources assigned to + * MSI-X capable devices won't (prepare) or may (release) change. + */ +#define PHYSDEVOP_prepare_msix 30 +#define PHYSDEVOP_release_msix 31 struct physdev_pci_device { /* IN */ uint16_t seg; diff --git a/init/Kconfig b/init/Kconfig index 22616cd434bc..5341d7232c3a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -28,10 +28,6 @@ config BUILDTIME_EXTABLE_SORT menu "General setup" -config EXPERIMENTAL - bool - default y - config BROKEN bool diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e5c4f609f22c..e4e47f647446 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -330,8 +330,16 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - if (!(flags & MS_KERNMOUNT)) - data = current->nsproxy->ipc_ns; + if (!(flags & MS_KERNMOUNT)) { + struct ipc_namespace *ns = current->nsproxy->ipc_ns; + /* Don't allow mounting unless the caller has CAP_SYS_ADMIN + * over the ipc namespace. + */ + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + data = ns; + } return mount_ns(fs_type, flags, data, mqueue_fill_super); } @@ -840,7 +848,8 @@ out_putfd: fd = error; } mutex_unlock(&root->d_inode->i_mutex); - mnt_drop_write(mnt); + if (!ro) + mnt_drop_write(mnt); out_putname: putname(name); return fd; diff --git a/ipc/msg.c b/ipc/msg.c index 31cd1bf6af27..fede1d06ef30 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, goto out_unlock; break; } + msg = ERR_PTR(-EAGAIN); } else break; msg_counter++; diff --git a/kernel/.gitignore b/kernel/.gitignore index ab4f1090f437..b3097bde4e9c 100644 --- a/kernel/.gitignore +++ b/kernel/.gitignore @@ -4,3 +4,4 @@ config_data.h config_data.gz timeconst.h +hz.bc diff --git a/kernel/capability.c b/kernel/capability.c index 493d97259484..f6c2ce5701e1 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap) EXPORT_SYMBOL(ns_capable); /** + * file_ns_capable - Determine if the file's opener had a capability in effect + * @file: The file we want to check + * @ns: The usernamespace we want the capability in + * @cap: The capability to be tested for + * + * Return true if task that opened the file had a capability in effect + * when the file was opened. + * + * This does not set PF_SUPERPRIV because the caller may not + * actually be privileged. + */ +bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) +{ + if (WARN_ON_ONCE(!cap_valid(cap))) + return false; + + if (security_capable(file->f_cred, ns, cap) == 0) + return true; + + return false; +} +EXPORT_SYMBOL(file_ns_capable); + +/** * capable - Determine if the current task has a superior capability in effect * @cap: The capability to be tested for * diff --git a/kernel/events/core.c b/kernel/events/core.c index b0cd86501c30..9fcb0944f071 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event) if (ctxn < 0) goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_task_ctx(ctx, task_event); } - if (ctx) - perf_event_task_ctx(ctx, task_event); next: put_cpu_ptr(pmu->pmu_cpu_context); } + if (task_event->task_ctx) + perf_event_task_ctx(task_event->task_ctx, task_event); + rcu_read_unlock(); } @@ -4593,6 +4596,7 @@ void perf_event_comm(struct task_struct *task) struct perf_event_context *ctx; int ctxn; + rcu_read_lock(); for_each_task_context_nr(ctxn) { ctx = task->perf_event_ctxp[ctxn]; if (!ctx) @@ -4600,6 +4604,7 @@ void perf_event_comm(struct task_struct *task) perf_event_enable_on_exec(ctx); } + rcu_read_unlock(); if (!atomic_read(&nr_comm_events)) return; @@ -4734,7 +4739,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) } else { if (arch_vma_name(mmap_event->vma)) { name = strncpy(tmp, arch_vma_name(mmap_event->vma), - sizeof(tmp)); + sizeof(tmp) - 1); + tmp[sizeof(tmp) - 1] = '\0'; goto got_name; } @@ -5327,7 +5333,7 @@ static void sw_perf_event_destroy(struct perf_event *event) static int perf_swevent_init(struct perf_event *event) { - int event_id = event->attr.config; + u64 event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; @@ -5647,6 +5653,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) event->attr.sample_period = NSEC_PER_SEC / freq; hwc->sample_period = event->attr.sample_period; local64_set(&hwc->period_left, hwc->sample_period); + hwc->last_period = hwc->sample_period; event->attr.freq = 0; } } @@ -5982,6 +5989,7 @@ skip_type: if (pmu->pmu_cpu_context) goto got_cpu_context; + ret = -ENOMEM; pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); if (!pmu->pmu_cpu_context) goto free_dev; diff --git a/kernel/events/internal.h b/kernel/events/internal.h index d56a64c99a8b..eb675c4d59df 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -16,7 +16,7 @@ struct ring_buffer { int page_order; /* allocation order */ #endif int nr_pages; /* nr of data pages */ - int writable; /* are we writable */ + int overwrite; /* can overwrite itself */ atomic_t poll; /* POLL_ for wakeups */ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 23cb34ff3973..97fddb09762b 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -18,12 +18,24 @@ static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, unsigned long offset, unsigned long head) { - unsigned long mask; + unsigned long sz = perf_data_size(rb); + unsigned long mask = sz - 1; - if (!rb->writable) + /* + * check if user-writable + * overwrite : over-write its own tail + * !overwrite: buffer possibly drops events. + */ + if (rb->overwrite) return true; - mask = perf_data_size(rb) - 1; + /* + * verify that payload is not bigger than buffer + * otherwise masking logic may fail to detect + * the "not enough space" condition + */ + if ((head - offset) > sz) + return false; offset = (offset - tail) & mask; head = (head - tail) & mask; @@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) rb->watermark = max_size / 2; if (flags & RING_BUFFER_WRITABLE) - rb->writable = 1; + rb->overwrite = 0; + else + rb->overwrite = 1; atomic_set(&rb->refcount, 1); diff --git a/kernel/exit.c b/kernel/exit.c index 51e485ca9935..60bc027c61c3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -835,7 +835,7 @@ void do_exit(long code) /* * Make sure we are holding no locks: */ - debug_check_no_locks_held(); + debug_check_no_locks_held(tsk); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done diff --git a/kernel/fork.c b/kernel/fork.c index 8d932b1c9056..1766d324d5e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1141,6 +1141,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); + if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) + return ERR_PTR(-EINVAL); + /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -1807,7 +1810,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) * If unsharing a user namespace must also unshare the thread. */ if (unshare_flags & CLONE_NEWUSER) - unshare_flags |= CLONE_THREAD; + unshare_flags |= CLONE_THREAD | CLONE_FS; /* * If unsharing a pid namespace must also unshare the thread. */ diff --git a/kernel/futex.c b/kernel/futex.c index f0090a993dab..b26dcfc02c94 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -223,7 +223,8 @@ static void drop_futex_key_refs(union futex_key *key) * @rw: mapping needs to be read/write (values: VERIFY_READ, * VERIFY_WRITE) * - * Returns a negative error code or 0 + * Return: a negative error code or 0 + * * The key words are stored in *key on success. * * For shared mappings, it's (page->index, file_inode(vma->vm_file), @@ -705,9 +706,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, * be "current" except in the case of requeue pi. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * - * Returns: - * 0 - ready to wait - * 1 - acquired the lock + * Return: + * 0 - ready to wait; + * 1 - acquired the lock; * <0 - error * * The hb->lock and futex_key refs shall be held by the caller. @@ -1191,9 +1192,9 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * - * Returns: - * 0 - failed to acquire the lock atomicly - * 1 - acquired the lock + * Return: + * 0 - failed to acquire the lock atomically; + * 1 - acquired the lock; * <0 - error */ static int futex_proxy_trylock_atomic(u32 __user *pifutex, @@ -1254,8 +1255,8 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire * uaddr2 atomically on behalf of the top waiter. * - * Returns: - * >=0 - on success, the number of tasks requeued or woken + * Return: + * >=0 - on success, the number of tasks requeued or woken; * <0 - on error */ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, @@ -1536,8 +1537,8 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must * be paired with exactly one earlier call to queue_me(). * - * Returns: - * 1 - if the futex_q was still queued (and we removed unqueued it) + * Return: + * 1 - if the futex_q was still queued (and we removed unqueued it); * 0 - if the futex_q was already removed by the waking thread */ static int unqueue_me(struct futex_q *q) @@ -1707,9 +1708,9 @@ static long futex_wait_restart(struct restart_block *restart); * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * - * Returns: - * 1 - success, lock taken - * 0 - success, lock not taken + * Return: + * 1 - success, lock taken; + * 0 - success, lock not taken; * <0 - on error (-EFAULT) */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) @@ -1824,8 +1825,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, * Return with the hb lock held and a q.key reference on success, and unlocked * with no q.key reference on failure. * - * Returns: - * 0 - uaddr contains val and hb has been locked + * Return: + * 0 - uaddr contains val and hb has been locked; * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked */ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, @@ -2203,9 +2204,9 @@ pi_faulted: * the wakeup and return the appropriate error code to the caller. Must be * called with the hb lock held. * - * Returns - * 0 - no early wakeup detected - * <0 - -ETIMEDOUT or -ERESTARTNOINTR + * Return: + * 0 = no early wakeup detected; + * <0 = -ETIMEDOUT or -ERESTARTNOINTR */ static inline int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, @@ -2247,7 +2248,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, * @val: the expected value of uaddr * @abs_time: absolute timeout * @bitset: 32 bit wakeup bitset set by userspace, defaults to all - * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to @@ -2258,7 +2258,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, * there was a need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there - * via the following: + * via the following-- * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() * 2) wakeup on uaddr2 after a requeue * 3) signal @@ -2276,8 +2276,8 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, * * If 4 or 7, we cleanup and return with -ETIMEDOUT. * - * Returns: - * 0 - On success + * Return: + * 0 - On success; * <0 - On error */ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cc47812d3feb..14be27feda49 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -63,6 +63,7 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = { + .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), .clock_base = { { @@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; - raw_spin_lock_init(&cpu_base->lock); - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); diff --git a/kernel/kexec.c b/kernel/kexec.c index bddd3d7a74b6..ffd4e111fd67 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -55,7 +55,7 @@ struct resource crashk_res = { .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; struct resource crashk_low_res = { - .name = "Crash kernel low", + .name = "Crash kernel", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM @@ -1368,35 +1368,114 @@ static int __init parse_crashkernel_simple(char *cmdline, return 0; } +#define SUFFIX_HIGH 0 +#define SUFFIX_LOW 1 +#define SUFFIX_NULL 2 +static __initdata char *suffix_tbl[] = { + [SUFFIX_HIGH] = ",high", + [SUFFIX_LOW] = ",low", + [SUFFIX_NULL] = NULL, +}; + /* - * That function is the entry point for command line parsing and should be - * called from the arch-specific code. + * That function parses "suffix" crashkernel command lines like + * + * crashkernel=size,[high|low] + * + * It returns 0 on success and -EINVAL on failure. */ +static int __init parse_crashkernel_suffix(char *cmdline, + unsigned long long *crash_size, + unsigned long long *crash_base, + const char *suffix) +{ + char *cur = cmdline; + + *crash_size = memparse(cmdline, &cur); + if (cmdline == cur) { + pr_warn("crashkernel: memory value expected\n"); + return -EINVAL; + } + + /* check with suffix */ + if (strncmp(cur, suffix, strlen(suffix))) { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + cur += strlen(suffix); + if (*cur != ' ' && *cur != '\0') { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + + return 0; +} + +static __init char *get_last_crashkernel(char *cmdline, + const char *name, + const char *suffix) +{ + char *p = cmdline, *ck_cmdline = NULL; + + /* find crashkernel and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + char *q; + + if (!end_p) + end_p = p + strlen(p); + + if (!suffix) { + int i; + + /* skip the one with any known suffix */ + for (i = 0; suffix_tbl[i]; i++) { + q = end_p - strlen(suffix_tbl[i]); + if (!strncmp(q, suffix_tbl[i], + strlen(suffix_tbl[i]))) + goto next; + } + ck_cmdline = p; + } else { + q = end_p - strlen(suffix); + if (!strncmp(q, suffix, strlen(suffix))) + ck_cmdline = p; + } +next: + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + return ck_cmdline; +} + static int __init __parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base, - const char *name) + const char *name, + const char *suffix) { - char *p = cmdline, *ck_cmdline = NULL; char *first_colon, *first_space; + char *ck_cmdline; BUG_ON(!crash_size || !crash_base); *crash_size = 0; *crash_base = 0; - /* find crashkernel and use the last one if there are more */ - p = strstr(p, name); - while (p) { - ck_cmdline = p; - p = strstr(p+1, name); - } + ck_cmdline = get_last_crashkernel(cmdline, name, suffix); if (!ck_cmdline) return -EINVAL; ck_cmdline += strlen(name); + if (suffix) + return parse_crashkernel_suffix(ck_cmdline, crash_size, + crash_base, suffix); /* * if the commandline contains a ':', then that's the extended * syntax -- if not, it must be the classic syntax @@ -1413,13 +1492,26 @@ static int __init __parse_crashkernel(char *cmdline, return 0; } +/* + * That function is the entry point for command line parsing and should be + * called from the arch-specific code. + */ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base) { return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel="); + "crashkernel=", NULL); +} + +int __init parse_crashkernel_high(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, + "crashkernel=", suffix_tbl[SUFFIX_HIGH]); } int __init parse_crashkernel_low(char *cmdline, @@ -1428,7 +1520,7 @@ int __init parse_crashkernel_low(char *cmdline, unsigned long long *crash_base) { return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel_low="); + "crashkernel=", suffix_tbl[SUFFIX_LOW]); } static void update_vmcoreinfo_note(void) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e35be53f6613..3fed7f0cbcdf 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -794,16 +794,16 @@ out: } #ifdef CONFIG_SYSCTL -/* This should be called with kprobe_mutex locked */ static void __kprobes optimize_all_kprobes(void) { struct hlist_head *head; struct kprobe *p; unsigned int i; + mutex_lock(&kprobe_mutex); /* If optimization is already allowed, just return */ if (kprobes_allow_optimization) - return; + goto out; kprobes_allow_optimization = true; for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void) optimize_kprobe(p); } printk(KERN_INFO "Kprobes globally optimized\n"); +out: + mutex_unlock(&kprobe_mutex); } -/* This should be called with kprobe_mutex locked */ static void __kprobes unoptimize_all_kprobes(void) { struct hlist_head *head; struct kprobe *p; unsigned int i; + mutex_lock(&kprobe_mutex); /* If optimization is already prohibited, just return */ - if (!kprobes_allow_optimization) + if (!kprobes_allow_optimization) { + mutex_unlock(&kprobe_mutex); return; + } kprobes_allow_optimization = false; for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void) unoptimize_kprobe(p, false); } } + mutex_unlock(&kprobe_mutex); + /* Wait for unoptimizing completion */ wait_for_kprobe_optimizer(); printk(KERN_INFO "Kprobes globally unoptimized\n"); } +static DEFINE_MUTEX(kprobe_sysctl_mutex); int sysctl_kprobes_optimization; int proc_kprobes_optimization_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, @@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, { int ret; - mutex_lock(&kprobe_mutex); + mutex_lock(&kprobe_sysctl_mutex); sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); @@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, optimize_all_kprobes(); else unoptimize_all_kprobes(); - mutex_unlock(&kprobe_mutex); + mutex_unlock(&kprobe_sysctl_mutex); return ret; } diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2ef9baf..9eb7fed0bbaa 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_PARKED); while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) complete(&self->parked); schedule(); - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_PARKED); } clear_bit(KTHREAD_IS_PARKED, &self->flags); __set_current_state(TASK_RUNNING); @@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), } EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind(struct task_struct *p, unsigned int cpu) +static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) { + /* Must have done schedule() in kthread() before we set_task_cpu */ + if (!wait_task_inactive(p, state)) { + WARN_ON(1); + return; + } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_THREAD_BOUND; @@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu) */ void kthread_bind(struct task_struct *p, unsigned int cpu) { - /* Must have done schedule() in kthread() before we set_task_cpu */ - if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { - WARN_ON(1); - return; - } - __kthread_bind(p, cpu); + __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(kthread_bind); @@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k) return NULL; } +static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) +{ + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + /* + * We clear the IS_PARKED bit here as we don't wait + * until the task has left the park code. So if we'd + * park before that happens we'd see the IS_PARKED bit + * which might be about to be cleared. + */ + if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) + __kthread_bind(k, kthread->cpu, TASK_PARKED); + wake_up_state(k, TASK_PARKED); + } +} + /** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). @@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = task_get_live_kthread(k); - if (kthread) { - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); - /* - * We clear the IS_PARKED bit here as we don't wait - * until the task has left the park code. So if we'd - * park before that happens we'd see the IS_PARKED bit - * which might be about to be cleared. - */ - if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) - __kthread_bind(k, kthread->cpu); - wake_up_process(k); - } - } + if (kthread) + __kthread_unpark(k, kthread); put_task_struct(k); } @@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k) trace_sched_kthread_stop(k); if (kthread) { set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + __kthread_unpark(k, kthread); wake_up_process(k); wait_for_completion(&kthread->exited); } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 259db207b5d9..6a3bccba7e7d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -380,6 +380,13 @@ static int verbose(struct lock_class *class) unsigned long nr_stack_trace_entries; static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; +static void print_lockdep_off(const char *bug_msg) +{ + printk(KERN_DEBUG "%s\n", bug_msg); + printk(KERN_DEBUG "turning off the locking correctness validator.\n"); + printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); +} + static int save_trace(struct stack_trace *trace) { trace->nr_entries = 0; @@ -409,8 +416,7 @@ static int save_trace(struct stack_trace *trace) if (!debug_locks_off_graph_unlock()) return 0; - printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); - printk("turning off the locking correctness validator.\n"); + print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); dump_stack(); return 0; @@ -763,8 +769,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) } raw_local_irq_restore(flags); - printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); - printk("turning off the locking correctness validator.\n"); + print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); dump_stack(); return NULL; } @@ -834,8 +839,7 @@ static struct lock_list *alloc_list_entry(void) if (!debug_locks_off_graph_unlock()) return NULL; - printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); - printk("turning off the locking correctness validator.\n"); + print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); dump_stack(); return NULL; } @@ -2000,7 +2004,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, struct lock_class *class = hlock_class(hlock); struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; - struct held_lock *hlock_curr, *hlock_next; + struct held_lock *hlock_curr; int i, j; /* @@ -2048,8 +2052,7 @@ cache_hit: if (!debug_locks_off_graph_unlock()) return 0; - printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); - printk("turning off the locking correctness validator.\n"); + print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); dump_stack(); return 0; } @@ -2057,12 +2060,10 @@ cache_hit: chain->chain_key = chain_key; chain->irq_context = hlock->irq_context; /* Find the first held_lock of current chain */ - hlock_next = hlock; for (i = curr->lockdep_depth - 1; i >= 0; i--) { hlock_curr = curr->held_locks + i; - if (hlock_curr->irq_context != hlock_next->irq_context) + if (hlock_curr->irq_context != hlock->irq_context) break; - hlock_next = hlock; } i++; chain->depth = curr->lockdep_depth + 1 - i; @@ -3190,9 +3191,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, #endif if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { debug_locks_off(); - printk("BUG: MAX_LOCK_DEPTH too low, depth: %i max: %lu!\n", + print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); + printk(KERN_DEBUG "depth: %i max: %lu!\n", curr->lockdep_depth, MAX_LOCK_DEPTH); - printk("turning off the locking correctness validator.\n"); lockdep_print_held_locks(current); debug_show_all_locks(); @@ -4088,7 +4089,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); -static void print_held_locks_bug(void) +static void print_held_locks_bug(struct task_struct *curr) { if (!debug_locks_off()) return; @@ -4097,21 +4098,22 @@ static void print_held_locks_bug(void) printk("\n"); printk("=====================================\n"); - printk("[ BUG: %s/%d still has locks held! ]\n", - current->comm, task_pid_nr(current)); + printk("[ BUG: lock held at task exit time! ]\n"); print_kernel_ident(); printk("-------------------------------------\n"); - lockdep_print_held_locks(current); + printk("%s/%d is exiting with locks still held!\n", + curr->comm, task_pid_nr(curr)); + lockdep_print_held_locks(curr); + printk("\nstack backtrace:\n"); dump_stack(); } -void debug_check_no_locks_held(void) +void debug_check_no_locks_held(struct task_struct *task) { - if (unlikely(current->lockdep_depth > 0)) - print_held_locks_bug(); + if (unlikely(task->lockdep_depth > 0)) + print_held_locks_bug(task); } -EXPORT_SYMBOL_GPL(debug_check_no_locks_held); void debug_show_all_locks(void) { diff --git a/kernel/mutex.c b/kernel/mutex.c index 52f23011b6e0..ad53a664f113 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -37,6 +37,12 @@ # include <asm/mutex.h> #endif +/* + * A negative mutex count indicates that waiters are sleeping waiting for the + * mutex. + */ +#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) + void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { @@ -44,6 +50,9 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); mutex_clear_owner(lock); +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER + lock->spin_mlock = NULL; +#endif debug_mutex_init(lock, name, key); } @@ -95,6 +104,124 @@ void __sched mutex_lock(struct mutex *lock) EXPORT_SYMBOL(mutex_lock); #endif +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER +/* + * In order to avoid a stampede of mutex spinners from acquiring the mutex + * more or less simultaneously, the spinners need to acquire a MCS lock + * first before spinning on the owner field. + * + * We don't inline mspin_lock() so that perf can correctly account for the + * time spent in this lock function. + */ +struct mspin_node { + struct mspin_node *next ; + int locked; /* 1 if lock acquired */ +}; +#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) + +static noinline +void mspin_lock(struct mspin_node **lock, struct mspin_node *node) +{ + struct mspin_node *prev; + + /* Init node */ + node->locked = 0; + node->next = NULL; + + prev = xchg(lock, node); + if (likely(prev == NULL)) { + /* Lock acquired */ + node->locked = 1; + return; + } + ACCESS_ONCE(prev->next) = node; + smp_wmb(); + /* Wait until the lock holder passes the lock down */ + while (!ACCESS_ONCE(node->locked)) + arch_mutex_cpu_relax(); +} + +static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) +{ + struct mspin_node *next = ACCESS_ONCE(node->next); + + if (likely(!next)) { + /* + * Release the lock by setting it to NULL + */ + if (cmpxchg(lock, node, NULL) == node) + return; + /* Wait until the next pointer is set */ + while (!(next = ACCESS_ONCE(node->next))) + arch_mutex_cpu_relax(); + } + ACCESS_ONCE(next->locked) = 1; + smp_wmb(); +} + +/* + * Mutex spinning code migrated from kernel/sched/core.c + */ + +static inline bool owner_running(struct mutex *lock, struct task_struct *owner) +{ + if (lock->owner != owner) + return false; + + /* + * Ensure we emit the owner->on_cpu, dereference _after_ checking + * lock->owner still matches owner, if that fails, owner might + * point to free()d memory, if it still matches, the rcu_read_lock() + * ensures the memory stays valid. + */ + barrier(); + + return owner->on_cpu; +} + +/* + * Look out! "owner" is an entirely speculative pointer + * access and not reliable. + */ +static noinline +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) +{ + rcu_read_lock(); + while (owner_running(lock, owner)) { + if (need_resched()) + break; + + arch_mutex_cpu_relax(); + } + rcu_read_unlock(); + + /* + * We break out the loop above on need_resched() and when the + * owner changed, which is a sign for heavy contention. Return + * success only when lock->owner is NULL. + */ + return lock->owner == NULL; +} + +/* + * Initial check for entering the mutex spinning loop + */ +static inline int mutex_can_spin_on_owner(struct mutex *lock) +{ + int retval = 1; + + rcu_read_lock(); + if (lock->owner) + retval = lock->owner->on_cpu; + rcu_read_unlock(); + /* + * if lock->owner is not set, the mutex owner may have just acquired + * it and not set the owner yet or the mutex has been released. + */ + return retval; +} +#endif + static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); /** @@ -158,25 +285,39 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * * We can't do this for DEBUG_MUTEXES because that relies on wait_lock * to serialize everything. + * + * The mutex spinners are queued up using MCS lock so that only one + * spinner can compete for the mutex. However, if mutex spinning isn't + * going to happen, there is no point in going through the lock/unlock + * overhead. */ + if (!mutex_can_spin_on_owner(lock)) + goto slowpath; for (;;) { struct task_struct *owner; + struct mspin_node node; /* * If there's an owner, wait for it to either * release the lock or go to sleep. */ + mspin_lock(MLOCK(lock), &node); owner = ACCESS_ONCE(lock->owner); - if (owner && !mutex_spin_on_owner(lock, owner)) + if (owner && !mutex_spin_on_owner(lock, owner)) { + mspin_unlock(MLOCK(lock), &node); break; + } - if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { + if ((atomic_read(&lock->count) == 1) && + (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { lock_acquired(&lock->dep_map, ip); mutex_set_owner(lock); + mspin_unlock(MLOCK(lock), &node); preempt_enable(); return 0; } + mspin_unlock(MLOCK(lock), &node); /* * When there's no owner, we might have preempted between the @@ -195,6 +336,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, */ arch_mutex_cpu_relax(); } +slowpath: #endif spin_lock_mutex(&lock->wait_lock, flags); @@ -205,7 +347,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; - if (atomic_xchg(&lock->count, -1) == 1) + if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) goto done; lock_contended(&lock->dep_map, ip); @@ -220,7 +362,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * that when we release the lock, we properly wake up the * other waiters: */ - if (atomic_xchg(&lock->count, -1) == 1) + if (MUTEX_SHOW_NO_WAITER(lock) && + (atomic_xchg(&lock->count, -1) == 1)) break; /* diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index c1c3dc1c6023..bea15bdf82b0 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -181,6 +181,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) int nr; int rc; struct task_struct *task, *me = current; + int init_pids = thread_group_leader(me) ? 1 : 2; /* Don't allow any more processes into the pid namespace */ disable_pid_allocation(pid_ns); @@ -230,7 +231,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) */ for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); - if (pid_ns->nr_hashed == 1) + if (pid_ns->nr_hashed == init_pids) break; schedule(); } diff --git a/kernel/printk.c b/kernel/printk.c index 0b31715f335a..abbdd9e2ac82 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ -DECLARE_WAIT_QUEUE_HEAD(log_wait); - int console_printk[4] = { DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ @@ -224,6 +222,7 @@ struct log { static DEFINE_RAW_SPINLOCK(logbuf_lock); #ifdef CONFIG_PRINTK +DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; static u32 syslog_idx; @@ -1957,45 +1956,6 @@ int is_console_locked(void) return console_locked; } -/* - * Delayed printk version, for scheduler-internal messages: - */ -#define PRINTK_BUF_SIZE 512 - -#define PRINTK_PENDING_WAKEUP 0x01 -#define PRINTK_PENDING_SCHED 0x02 - -static DEFINE_PER_CPU(int, printk_pending); -static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); - -static void wake_up_klogd_work_func(struct irq_work *irq_work) -{ - int pending = __this_cpu_xchg(printk_pending, 0); - - if (pending & PRINTK_PENDING_SCHED) { - char *buf = __get_cpu_var(printk_sched_buf); - printk(KERN_WARNING "[sched_delayed] %s", buf); - } - - if (pending & PRINTK_PENDING_WAKEUP) - wake_up_interruptible(&log_wait); -} - -static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { - .func = wake_up_klogd_work_func, - .flags = IRQ_WORK_LAZY, -}; - -void wake_up_klogd(void) -{ - preempt_disable(); - if (waitqueue_active(&log_wait)) { - this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); - irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); - } - preempt_enable(); -} - static void console_cont_flush(char *text, size_t size) { unsigned long flags; @@ -2458,6 +2418,44 @@ static int __init printk_late_init(void) late_initcall(printk_late_init); #if defined CONFIG_PRINTK +/* + * Delayed printk version, for scheduler-internal messages: + */ +#define PRINTK_BUF_SIZE 512 + +#define PRINTK_PENDING_WAKEUP 0x01 +#define PRINTK_PENDING_SCHED 0x02 + +static DEFINE_PER_CPU(int, printk_pending); +static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); + +static void wake_up_klogd_work_func(struct irq_work *irq_work) +{ + int pending = __this_cpu_xchg(printk_pending, 0); + + if (pending & PRINTK_PENDING_SCHED) { + char *buf = __get_cpu_var(printk_sched_buf); + printk(KERN_WARNING "[sched_delayed] %s", buf); + } + + if (pending & PRINTK_PENDING_WAKEUP) + wake_up_interruptible(&log_wait); +} + +static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { + .func = wake_up_klogd_work_func, + .flags = IRQ_WORK_LAZY, +}; + +void wake_up_klogd(void) +{ + preempt_disable(); + if (waitqueue_active(&log_wait)) { + this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); + irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); + } + preempt_enable(); +} int printk_sched(const char *fmt, ...) { diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index 7890b10084a7..1d96dd0d93c1 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -14,6 +14,7 @@ #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/freezer.h> +#include <linux/stat.h> #include "rtmutex.h" @@ -366,8 +367,8 @@ static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *at return curr - buf; } -static DEVICE_ATTR(status, 0600, sysfs_test_status, NULL); -static DEVICE_ATTR(command, 0600, NULL, sysfs_test_command); +static DEVICE_ATTR(status, S_IRUSR, sysfs_test_status, NULL); +static DEVICE_ATTR(command, S_IWUSR, NULL, sysfs_test_command); static struct bus_type rttest_subsys = { .name = "rttest", diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c685e31492df..c3ae1446461c 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd) u64 this_clock, remote_clock; u64 *ptr, old_val, val; +#if BITS_PER_LONG != 64 +again: + /* + * Careful here: The local and the remote clock values need to + * be read out atomic as we need to compare the values and + * then update either the local or the remote side. So the + * cmpxchg64 below only protects one readout. + * + * We must reread via sched_clock_local() in the retry case on + * 32bit as an NMI could use sched_clock_local() via the + * tracer and hit between the readout of + * the low32bit and the high 32bit portion. + */ + this_clock = sched_clock_local(my_scd); + /* + * We must enforce atomic readout on 32bit, otherwise the + * update on the remote cpu can hit inbetween the readout of + * the low32bit and the high 32bit portion. + */ + remote_clock = cmpxchg64(&scd->clock, 0, 0); +#else + /* + * On 64bit the read of [my]scd->clock is atomic versus the + * update, so we can avoid the above 32bit dance. + */ sched_clock_local(my_scd); again: this_clock = my_scd->clock; remote_clock = scd->clock; +#endif /* * Use the opportunity that we have both locks diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393c..42053547e0f5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p) { struct rq *rq = task_rq(p); - BUG_ON(rq != this_rq()); - BUG_ON(p == current); + if (WARN_ON_ONCE(rq != this_rq()) || + WARN_ON_ONCE(p == current)) + return; + lockdep_assert_held(&rq->lock); if (!raw_spin_trylock(&p->pi_lock)) { @@ -2997,51 +2999,6 @@ void __sched schedule_preempt_disabled(void) preempt_disable(); } -#ifdef CONFIG_MUTEX_SPIN_ON_OWNER - -static inline bool owner_running(struct mutex *lock, struct task_struct *owner) -{ - if (lock->owner != owner) - return false; - - /* - * Ensure we emit the owner->on_cpu, dereference _after_ checking - * lock->owner still matches owner, if that fails, owner might - * point to free()d memory, if it still matches, the rcu_read_lock() - * ensures the memory stays valid. - */ - barrier(); - - return owner->on_cpu; -} - -/* - * Look out! "owner" is an entirely speculative pointer - * access and not reliable. - */ -int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) -{ - if (!sched_feat(OWNER_SPIN)) - return 0; - - rcu_read_lock(); - while (owner_running(lock, owner)) { - if (need_resched()) - break; - - arch_mutex_cpu_relax(); - } - rcu_read_unlock(); - - /* - * We break out the loop above on need_resched() and when the - * owner changed, which is a sign for heavy contention. Return - * success only when lock->owner is NULL. - */ - return lock->owner == NULL; -} -#endif - #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption @@ -4999,7 +4956,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) } static int min_load_idx = 0; -static int max_load_idx = CPU_LOAD_IDX_MAX; +static int max_load_idx = CPU_LOAD_IDX_MAX-1; static void set_table_entry(struct ctl_table *entry, diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ed12cbb135f4..e93cca92f38b 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) t = tsk; do { - task_cputime(tsk, &utime, &stime); + task_cputime(t, &utime, &stime); times->utime += utime; times->stime += stime; times->sum_exec_runtime += task_sched_runtime(t); diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1ad1d2b5395f..99399f8e4799 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -46,13 +46,6 @@ SCHED_FEAT(DOUBLE_TICK, false) SCHED_FEAT(LB_BIAS, true) /* - * Spin-wait on mutex acquisition when the mutex owner is running on - * another cpu -- assumes that when the owner is running, it will soon - * release the lock. Decreases scheduling overhead. - */ -SCHED_FEAT(OWNER_SPIN, true) - -/* * Decrement CPU power based on time not spent running tasks */ SCHED_FEAT(NONTASK_POWER, true) diff --git a/kernel/signal.c b/kernel/signal.c index 2ec870a4c3c4..598dc06be421 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default) if (force_default || ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; ka->sa.sa_flags = 0; +#ifdef __ARCH_HAS_SA_RESTORER + ka->sa.sa_restorer = NULL; +#endif sigemptyset(&ka->sa.sa_mask); ka++; } @@ -2682,7 +2685,7 @@ static int do_sigpending(void *set, unsigned long sigsetsize) /** * sys_rt_sigpending - examine a pending signal that has been raised * while blocked - * @set: stores pending signals + * @uset: stores pending signals * @sigsetsize: size of sigset_t type or larger */ SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) @@ -2945,7 +2948,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) static int do_tkill(pid_t tgid, pid_t pid, int sig) { - struct siginfo info; + struct siginfo info = {}; info.si_signo = sig; info.si_errno = 0; diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 8eaed9aa9cf0..02fc5c933673 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) } get_task_struct(tsk); *per_cpu_ptr(ht->store, cpu) = tsk; - if (ht->create) - ht->create(cpu); + if (ht->create) { + /* + * Make sure that the task has actually scheduled out + * into park position, before calling the create + * callback. At least the migration thread callback + * requires that the task is off the runqueue. + */ + if (!wait_task_inactive(tsk, TASK_PARKED)) + WARN_ON(1); + else + ht->create(cpu); + } return 0; } diff --git a/kernel/sys.c b/kernel/sys.c index 81f56445fba9..0da73cf73e60 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd) system_state = SYSTEM_RESTART; usermodehelper_disable(); device_shutdown(); - syscore_shutdown(); } /** @@ -370,6 +369,7 @@ void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); disable_nonboot_cpus(); + syscore_shutdown(); if (!cmd) printk(KERN_EMERG "Restarting system.\n"); else @@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state) void kernel_halt(void) { kernel_shutdown_prepare(SYSTEM_HALT); + disable_nonboot_cpus(); syscore_shutdown(); printk(KERN_EMERG "System halted.\n"); kmsg_dump(KMSG_DUMP_HALT); @@ -2185,9 +2186,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; -static int __orderly_poweroff(void) +static int __orderly_poweroff(bool force) { - int argc; char **argv; static char *envp[] = { "HOME=/", @@ -2196,20 +2196,40 @@ static int __orderly_poweroff(void) }; int ret; - argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); - if (argv == NULL) { + argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); + if (argv) { + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); + argv_free(argv); + } else { printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", - __func__, poweroff_cmd); - return -ENOMEM; + __func__, poweroff_cmd); + ret = -ENOMEM; } - ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, - NULL, NULL, NULL); - argv_free(argv); + if (ret && force) { + printk(KERN_WARNING "Failed to start orderly shutdown: " + "forcing the issue\n"); + /* + * I guess this should try to kick off some daemon to sync and + * poweroff asap. Or not even bother syncing if we're doing an + * emergency shutdown? + */ + emergency_sync(); + kernel_power_off(); + } return ret; } +static bool poweroff_force; + +static void poweroff_work_func(struct work_struct *work) +{ + __orderly_poweroff(poweroff_force); +} + +static DECLARE_WORK(poweroff_work, poweroff_work_func); + /** * orderly_poweroff - Trigger an orderly system poweroff * @force: force poweroff if command execution fails @@ -2219,21 +2239,9 @@ static int __orderly_poweroff(void) */ int orderly_poweroff(bool force) { - int ret = __orderly_poweroff(); - - if (ret && force) { - printk(KERN_WARNING "Failed to start orderly shutdown: " - "forcing the issue\n"); - - /* - * I guess this should try to kick off some daemon to sync and - * poweroff asap. Or not even bother syncing if we're doing an - * emergency shutdown? - */ - emergency_sync(); - kernel_power_off(); - } - - return ret; + if (force) /* do not override the pending "true" */ + poweroff_force = true; + schedule_work(&poweroff_work); + return 0; } EXPORT_SYMBOL_GPL(orderly_poweroff); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 2fb8cb88df8d..7f32fe0e52cd 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) */ int tick_check_broadcast_device(struct clock_event_device *dev) { - if ((tick_broadcast_device.evtdev && + if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || + (tick_broadcast_device.evtdev && tick_broadcast_device.evtdev->rating >= dev->rating) || (dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 192473b22799..fc382d6e2765 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -414,24 +414,28 @@ config PROBE_EVENTS def_bool n config DYNAMIC_FTRACE - bool "enable/disable ftrace tracepoints dynamically" + bool "enable/disable function tracing dynamically" depends on FUNCTION_TRACER depends on HAVE_DYNAMIC_FTRACE default y help - This option will modify all the calls to ftrace dynamically - (will patch them out of the binary image and replace them - with a No-Op instruction) as they are called. A table is - created to dynamically enable them again. + This option will modify all the calls to function tracing + dynamically (will patch them out of the binary image and + replace them with a No-Op instruction) on boot up. During + compile time, a table is made of all the locations that ftrace + can function trace, and this table is linked into the kernel + image. When this is enabled, functions can be individually + enabled, and the functions not enabled will not affect + performance of the system. + + See the files in /sys/kernel/debug/tracing: + available_filter_functions + set_ftrace_filter + set_ftrace_notrace This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise has native performance as long as no tracing is active. - The changes to the code are done by a kernel thread that - wakes up once a second and checks to see if any ftrace calls - were made. If so, it runs stop_machine (stops all CPUS) - and modifies the code to jump over the call to ftrace. - config DYNAMIC_FTRACE_WITH_REGS def_bool y depends on DYNAMIC_FTRACE diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 9e5b8c272eec..5a0f781cd729 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -739,12 +739,6 @@ static void blk_add_trace_rq_complete(void *ignore, struct request_queue *q, struct request *rq) { - struct blk_trace *bt = q->blk_trace; - - /* if control ever passes through here, it's a request based driver */ - if (unlikely(bt && !bt->rq_based)) - bt->rq_based = true; - blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); } @@ -780,24 +774,10 @@ static void blk_add_trace_bio_bounce(void *ignore, blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); } -static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error) +static void blk_add_trace_bio_complete(void *ignore, + struct request_queue *q, struct bio *bio, + int error) { - struct request_queue *q; - struct blk_trace *bt; - - if (!bio->bi_bdev) - return; - - q = bdev_get_queue(bio->bi_bdev); - bt = q->blk_trace; - - /* - * Request based drivers will generate both rq and bio completions. - * Ignore bio ones. - */ - if (likely(!bt) || bt->rq_based) - return; - blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ab25b88aae56..b3fde6d7b7fc 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -66,7 +66,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { .func = ftrace_stub, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, }; /* ftrace_enabled is a method to turn ftrace on or off */ @@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) free_page(tmp); } - free_page((unsigned long)stat->pages); stat->pages = NULL; stat->start = NULL; @@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) static struct pid * const ftrace_swapper_pid = &init_struct_pid; +loff_t +ftrace_filter_lseek(struct file *file, loff_t offset, int whence) +{ + loff_t ret; + + if (file->f_mode & FMODE_READ) + ret = seq_lseek(file, offset, whence); + else + file->f_pos = ret = 1; + + return ret; +} + #ifdef CONFIG_DYNAMIC_FTRACE #ifndef CONFIG_FTRACE_MCOUNT_RECORD @@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash) * routine, you can use ftrace_filter_write() for the write * routine if @flag has FTRACE_ITER_FILTER set, or * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. - * ftrace_regex_lseek() should be used as the lseek routine, and + * ftrace_filter_lseek() should be used as the lseek routine, and * release must call ftrace_regex_release(). */ int @@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file) inode, file); } -loff_t -ftrace_regex_lseek(struct file *file, loff_t offset, int whence) -{ - loff_t ret; - - if (file->f_mode & FMODE_READ) - ret = seq_lseek(file, offset, whence); - else - file->f_pos = ret = 1; - - return ret; -} - static int ftrace_match(char *str, char *regex, int len, int type) { int matched = 0; @@ -3104,8 +3103,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, continue; } - hlist_del(&entry->node); - call_rcu(&entry->rcu, ftrace_free_entry_rcu); + hlist_del_rcu(&entry->node); + call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); } } __disable_ftrace_function_probe(); @@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; static int __init set_ftrace_notrace(char *str) { - strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); + strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); return 1; } __setup("ftrace_notrace=", set_ftrace_notrace); static int __init set_ftrace_filter(char *str) { - strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); + strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); return 1; } __setup("ftrace_filter=", set_ftrace_filter); @@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_regex_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_regex_release, }; @@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = { .open = ftrace_notrace_open, .read = seq_read, .write = ftrace_notrace_write, - .llseek = ftrace_regex_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_regex_release, }; @@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = { .open = ftrace_graph_open, .read = seq_read, .write = ftrace_graph_write, + .llseek = ftrace_filter_lseek, .release = ftrace_graph_release, - .llseek = seq_lseek, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -4131,7 +4130,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, preempt_disable_notrace(); trace_recursion_set(TRACE_CONTROL_BIT); do_for_each_ftrace_op(op, ftrace_control_list) { - if (!ftrace_function_local_disabled(op) && + if (!(op->flags & FTRACE_OPS_FL_STUB) && + !ftrace_function_local_disabled(op) && ftrace_ops_test(op, ip)) op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); @@ -4439,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = { .open = ftrace_pid_open, .write = ftrace_pid_write, .read = seq_read, - .llseek = seq_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_pid_release, }; @@ -4555,12 +4555,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ftrace_startup_sysctl(); /* we are starting ftrace again */ - if (ftrace_ops_list != &ftrace_list_end) { - if (ftrace_ops_list->next == &ftrace_list_end) - ftrace_trace_function = ftrace_ops_list->func; - else - ftrace_trace_function = ftrace_ops_list_func; - } + if (ftrace_ops_list != &ftrace_list_end) + update_ftrace_function(); } else { /* stopping ftrace calls (just send to ftrace_stub) */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c2e2c2310374..66338c4f7f4b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -132,7 +132,7 @@ static char *default_bootup_tracer; static int __init set_cmdline_ftrace(char *str) { - strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); + strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = 1; @@ -162,7 +162,7 @@ static char *trace_boot_options __initdata; static int __init set_trace_boot_options(char *str) { - strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); + strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); trace_boot_options = trace_boot_options_buf; return 0; } @@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct ring_buffer *buf = tr->buffer; + struct ring_buffer *buf; if (trace_stop_count) return; @@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) arch_spin_lock(&ftrace_max_lock); + buf = tr->buffer; tr->buffer = max_tr.buffer; max_tr.buffer = buf; @@ -743,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) + if (!current_trace->allocated_snapshot) { + /* Only the nop tracer should hit this when disabling */ + WARN_ON_ONCE(current_trace != &nop_trace); return; + } arch_spin_lock(&ftrace_max_lock); @@ -2400,6 +2404,27 @@ static void test_ftrace_alive(struct seq_file *m) seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); } +#ifdef CONFIG_TRACER_MAX_TRACE +static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) +{ + if (iter->trace->allocated_snapshot) + seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); + else + seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); + + seq_printf(m, "# Snapshot commands:\n"); + seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); + seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); + seq_printf(m, "# Takes a snapshot of the main buffer.\n"); + seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); + seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); + seq_printf(m, "# is not a '0' or '1')\n"); +} +#else +/* Should never be called */ +static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } +#endif + static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; @@ -2411,7 +2436,9 @@ static int s_show(struct seq_file *m, void *v) seq_puts(m, "#\n"); test_ftrace_alive(m); } - if (iter->trace && iter->trace->print_header) + if (iter->snapshot && trace_empty(iter)) + print_snapshot_help(m, iter); + else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); @@ -2857,11 +2884,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) return -EINVAL; } -static void set_tracer_flags(unsigned int mask, int enabled) +/* Some tracers require overwrite to stay enabled */ +int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) +{ + if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) + return -1; + + return 0; +} + +int set_tracer_flag(unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(trace_flags & mask) == !!enabled) - return; + return 0; + + /* Give the tracer a chance to approve the change */ + if (current_trace->flag_changed) + if (current_trace->flag_changed(current_trace, mask, !!enabled)) + return -EINVAL; if (enabled) trace_flags |= mask; @@ -2871,18 +2912,24 @@ static void set_tracer_flags(unsigned int mask, int enabled) if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); - if (mask == TRACE_ITER_OVERWRITE) + if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(global_trace.buffer, enabled); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_change_overwrite(max_tr.buffer, enabled); +#endif + } if (mask == TRACE_ITER_PRINTK) trace_printk_start_stop_comm(enabled); + + return 0; } static int trace_set_options(char *option) { char *cmp; int neg = 0; - int ret = 0; + int ret = -ENODEV; int i; cmp = strstrip(option); @@ -2892,19 +2939,20 @@ static int trace_set_options(char *option) cmp += 2; } + mutex_lock(&trace_types_lock); + for (i = 0; trace_options[i]; i++) { if (strcmp(cmp, trace_options[i]) == 0) { - set_tracer_flags(1 << i, !neg); + ret = set_tracer_flag(1 << i, !neg); break; } } /* If no option could be set, test the specific tracer options */ - if (!trace_options[i]) { - mutex_lock(&trace_types_lock); + if (!trace_options[i]) ret = set_tracer_option(current_trace, cmp, neg); - mutex_unlock(&trace_types_lock); - } + + mutex_unlock(&trace_types_lock); return ret; } @@ -2914,6 +2962,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; + int ret; if (cnt >= sizeof(buf)) return -EINVAL; @@ -2923,7 +2972,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, buf[cnt] = 0; - trace_set_options(buf); + ret = trace_set_options(buf); + if (ret < 0) + return ret; *ppos += cnt; @@ -3227,6 +3278,9 @@ static int tracing_set_tracer(const char *buf) goto out; trace_branch_disable(); + + current_trace->enabled = false; + if (current_trace->reset) current_trace->reset(tr); @@ -3271,6 +3325,7 @@ static int tracing_set_tracer(const char *buf) } current_trace = t; + current_trace->enabled = true; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); @@ -4144,8 +4199,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, default: if (current_trace->allocated_snapshot) tracing_reset_online_cpus(&max_tr); - else - ret = -EINVAL; break; } @@ -4759,7 +4812,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, if (val != 0 && val != 1) return -EINVAL; - set_tracer_flags(1 << index, val); + + mutex_lock(&trace_types_lock); + ret = set_tracer_flag(1 << index, val); + mutex_unlock(&trace_types_lock); + + if (ret < 0) + return ret; *ppos += cnt; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 57d7e5397d56..2081971367ea 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -283,11 +283,15 @@ struct tracer { enum print_line_t (*print_line)(struct trace_iterator *iter); /* If you handled the flag setting, return 0 */ int (*set_flag)(u32 old_flags, u32 bit, int set); + /* Return 0 if OK with change, else return non-zero */ + int (*flag_changed)(struct tracer *tracer, + u32 mask, int set); struct tracer *next; struct tracer_flags *flags; bool print_max; bool use_max_tr; bool allocated_snapshot; + bool enabled; }; @@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[]; void trace_printk_init_buffers(void); void trace_printk_start_comm(void); +int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); +int set_tracer_flag(unsigned int mask, int enabled); #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 713a2cac4881..443b25b43b4f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -32,7 +32,7 @@ enum { static int trace_type __read_mostly; -static int save_lat_flag; +static int save_flags; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); @@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) static void __irqsoff_tracer_init(struct trace_array *tr) { - save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; - trace_flags |= TRACE_ITER_LATENCY_FMT; + save_flags = trace_flags; + + /* non overwrite screws up the latency tracers */ + set_tracer_flag(TRACE_ITER_OVERWRITE, 1); + set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); tracing_max_latency = 0; irqsoff_trace = tr; @@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr) static void irqsoff_tracer_reset(struct trace_array *tr) { + int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; + int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; + stop_irqsoff_tracer(tr, is_graph()); - if (!save_lat_flag) - trace_flags &= ~TRACE_ITER_LATENCY_FMT; + set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); + set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); } static void irqsoff_tracer_start(struct trace_array *tr) @@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly = .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif @@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly = .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif @@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a1..fde652c9a511 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr); static int wakeup_graph_entry(struct ftrace_graph_ent *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace); -static int save_lat_flag; +static int save_flags; #define TRACE_DISPLAY_GRAPH 1 @@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) static int __wakeup_tracer_init(struct trace_array *tr) { - save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; - trace_flags |= TRACE_ITER_LATENCY_FMT; + save_flags = trace_flags; + + /* non overwrite screws up the latency tracers */ + set_tracer_flag(TRACE_ITER_OVERWRITE, 1); + set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); tracing_max_latency = 0; wakeup_trace = tr; @@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) static void wakeup_tracer_reset(struct trace_array *tr) { + int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; + int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; + stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); - if (!save_lat_flag) - trace_flags &= ~TRACE_ITER_LATENCY_FMT; + set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); + set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); } static void wakeup_tracer_start(struct trace_array *tr) @@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly = .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif @@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, + .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 42ca822fc701..83a8b5b7bd35 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = { .open = stack_trace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_regex_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_regex_release, }; diff --git a/kernel/user.c b/kernel/user.c index e81978e8c03b..8e635a18ab52 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -51,6 +51,8 @@ struct user_namespace init_user_ns = { .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .proc_inum = PROC_USER_INIT_INO, + .may_mount_sysfs = true, + .may_mount_proc = true, }; EXPORT_SYMBOL_GPL(init_user_ns); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 8b650837083e..e134d8f365dd 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -21,10 +21,12 @@ #include <linux/uaccess.h> #include <linux/ctype.h> #include <linux/projid.h> +#include <linux/fs_struct.h> static struct kmem_cache *user_ns_cachep __read_mostly; -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(const struct file *file, + struct user_namespace *ns, int cap_setid, struct uid_gid_map *map); static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) @@ -60,6 +62,15 @@ int create_user_ns(struct cred *new) kgid_t group = new->egid; int ret; + /* + * Verify that we can not violate the policy of which files + * may be accessed that is specified by the root directory, + * by verifing that the root directory is at the root of the + * mount namespace which allows all files to be accessed. + */ + if (current_chrooted()) + return -EPERM; + /* The creator needs a mapping in the parent user namespace * or else we won't be able to reasonably tell userspace who * created a user_namespace. @@ -86,6 +97,8 @@ int create_user_ns(struct cred *new) set_cred_user_ns(new, ns); + update_mnt_policy(ns); + return 0; } @@ -600,10 +613,10 @@ static ssize_t map_write(struct file *file, const char __user *buf, if (map->nr_extents != 0) goto out; - /* Require the appropriate privilege CAP_SETUID or CAP_SETGID - * over the user namespace in order to set the id mapping. + /* + * Adjusting namespace settings requires capabilities on the target. */ - if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) goto out; /* Get a buffer */ @@ -688,7 +701,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ - if (!new_idmap_permitted(ns, cap_setid, &new_map)) + if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the @@ -775,7 +788,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t &ns->projid_map, &ns->parent->projid_map); } -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(const struct file *file, + struct user_namespace *ns, int cap_setid, struct uid_gid_map *new_map) { /* Allow mapping to your own filesystem ids */ @@ -783,12 +797,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, u32 id = new_map->extent[0].lower_first; if (cap_setid == CAP_SETUID) { kuid_t uid = make_kuid(ns->parent, id); - if (uid_eq(uid, current_fsuid())) + if (uid_eq(uid, file->f_cred->fsuid)) return true; } else if (cap_setid == CAP_SETGID) { kgid_t gid = make_kgid(ns->parent, id); - if (gid_eq(gid, current_fsgid())) + if (gid_eq(gid, file->f_cred->fsgid)) return true; } } @@ -799,8 +813,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, /* Allow the specified ids if we have the appropriate capability * (CAP_SETUID or CAP_SETGID) over the parent user namespace. + * And the opener of the id file also had the approprpiate capability. */ - if (ns_capable(ns->parent, cap_setid)) + if (ns_capable(ns->parent, cap_setid) && + file_ns_capable(file, ns->parent, cap_setid)) return true; return false; @@ -837,6 +853,9 @@ static int userns_install(struct nsproxy *nsproxy, void *ns) if (atomic_read(¤t->mm->mm_users) > 1) return -EINVAL; + if (current->fs->users != 1) + return -EINVAL; + if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811eb..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool) int ret; mutex_lock(&worker_pool_idr_mutex); - idr_pre_get(&worker_pool_idr, GFP_KERNEL); - ret = idr_get_new(&worker_pool_idr, pool, &pool->id); + ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); + if (ret >= 0) + pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); - return ret; + return ret < 0 ? ret : 0; } /* @@ -3446,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) spin_unlock_irq(&pool->lock); mutex_unlock(&pool->assoc_mutex); - } - /* - * Call schedule() so that we cross rq->lock and thus can guarantee - * sched callbacks see the %WORKER_UNBOUND flag. This is necessary - * as scheduler callbacks may be invoked from other cpus. - */ - schedule(); + /* + * Call schedule() so that we cross rq->lock and thus can + * guarantee sched callbacks see the %WORKER_UNBOUND flag. + * This is necessary as scheduler callbacks may be invoked + * from other cpus. + */ + schedule(); - /* - * Sched callbacks are disabled now. Zap nr_running. After this, - * nr_running stays zero and need_more_worker() and keep_working() - * are always true as long as the worklist is not empty. Pools on - * @cpu now behave as unbound (in terms of concurrency management) - * pools which are served by workers tied to the CPU. - * - * On return from this function, the current worker would trigger - * unbound chain execution of pending work items if other workers - * didn't already. - */ - for_each_std_worker_pool(pool, cpu) + /* + * Sched callbacks are disabled now. Zap nr_running. + * After this, nr_running stays zero and need_more_worker() + * and keep_working() are always true as long as the + * worklist is not empty. This pool now behaves as an + * unbound (in terms of concurrency management) pool which + * are served by workers tied to the pool. + */ atomic_set(&pool->nr_running, 0); + + /* + * With concurrency management just turned off, a busy + * worker blocking could lead to lengthy stalls. Kick off + * unbound chain execution of currently pending work items. + */ + spin_lock_irq(&pool->lock); + wake_up_worker(pool); + spin_unlock_irq(&pool->lock); + } } /* diff --git a/lib/Kconfig b/lib/Kconfig index 3958dc4389f9..fe01d418b09a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -404,4 +404,7 @@ config OID_REGISTRY help Enable fast lookup object identifier registry. +config UCS2_STRING + tristate + endmenu diff --git a/lib/Makefile b/lib/Makefile index d7946ff75b2e..6e2cc561f761 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN $@ cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ clean-files += oid_registry_data.c + +obj-$(CONFIG_UCS2_STRING) += ucs2_string.o diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d1..f8e0e5367398 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/printk.h> #include <linux/spinlock.h> #include <linux/tty.h> #include <linux/wait.h> @@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes) wake_up_klogd(); } } - - diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d0..d87a17a819d0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref) entry = bucket_find_exact(bucket, ref); if (!entry) { + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, - "DMA-API: device driver tries " - "to free an invalid DMA memory address\n"); - return; + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); } - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - goto out; + return; } if (ref->size != entry->size) { @@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref) hash_bucket_del(entry); dma_entry_free(entry); -out: put_hash_bucket(bucket, &flags); } @@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); - entry = bucket_find_exact(bucket, &ref); - if (!entry) - goto out; + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } - entry->map_err_type = MAP_ERR_CHECKED; -out: put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); diff --git a/lib/idr.c b/lib/idr.c index 00739aaf95a2..322e2816f2fb 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) if (layer_idr) return get_from_free_list(layer_idr); - /* try to allocate directly from kmem_cache */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); + /* + * Try to allocate directly from kmem_cache. We want to try this + * before preload buffer; otherwise, non-preloading idr_alloc() + * users will end up taking advantage of preloading ones. As the + * following is allowed to fail for preloaded cases, suppress + * warning this time. + */ + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); if (new) return new; @@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) * Try to fetch one from the per-cpu preload buffer if in process * context. See idr_preload() for details. */ - if (in_interrupt()) - return NULL; - - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; + if (!in_interrupt()) { + preempt_disable(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } + preempt_enable(); + if (new) + return new; } - preempt_enable(); - return new; + + /* + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so + * that memory allocation failure warning is printed as intended. + */ + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); } static void idr_layer_rcu_free(struct rcu_head *head) @@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) } } -/** - * idr_pre_get - reserve resources for idr allocation - * @idp: idr handle - * @gfp_mask: memory allocation flags - * - * This function should be called prior to calling the idr_get_new* functions. - * It preallocates enough memory to satisfy the worst possible allocation. The - * caller should pass in GFP_KERNEL if possible. This of course requires that - * no spinning locks be held. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. - */ -int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; @@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) } return 1; } -EXPORT_SYMBOL(idr_pre_get); +EXPORT_SYMBOL(__idr_pre_get); /** * sub_alloc - try to allocate an id without growing the tree depth * @idp: idr handle * @starting_id: id to start search at - * @id: pointer to the allocated handle * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer * @gfp_mask: allocation mask for idr_layer_alloc() * @layer_idr: optional idr passed to idr_layer_alloc() @@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, idr_mark_full(pa, id); } -/** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle - * - * This is the allocate id function. It should be called with any - * required locks. - * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. - * - * If the idr is full idr_get_new_above() will return %-ENOSPC. - * - * @id returns a value in the range @starting_id ... %0x7fffffff - */ -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { struct idr_layer *pa[MAX_IDR_LEVEL + 1]; int rv; @@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) *id = rv; return 0; } -EXPORT_SYMBOL(idr_get_new_above); +EXPORT_SYMBOL(__idr_get_new_above); /** * idr_preload - preload for idr_alloc() @@ -908,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) int ida_pre_get(struct ida *ida, gfp_t gfp_mask) { /* allocate idr_layers */ - if (!idr_pre_get(&ida->idr, gfp_mask)) + if (!__idr_pre_get(&ida->idr, gfp_mask)) return 0; /* allocate free_bitmap */ diff --git a/lib/kobject.c b/lib/kobject.c index e07ee1fcd6f1..a65486613d79 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj) return kobj; } +static struct kobject *kobject_get_unless_zero(struct kobject *kobj) +{ + if (!kref_get_unless_zero(&kobj->kref)) + kobj = NULL; + return kobj; +} + /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup @@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { - ret = kobject_get(k); + ret = kobject_get_unless_zero(k); break; } } diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55b..d23762e6652c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str) if (!strcmp(str, "force")) swiotlb_force = 1; - return 1; + return 0; } -__setup("swiotlb=", setup_io_tlb_npages); +early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) @@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void) return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ + unsigned long size; + + size = io_tlb_nslabs << IO_TLB_SHIFT; + + return size ? size : (IO_TLB_DEFAULT_SIZE); +} + /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - /* default to 64MB */ - size_t default_size = 64UL<<20; + size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c new file mode 100644 index 000000000000..6f500ef2301d --- /dev/null +++ b/lib/ucs2_string.c @@ -0,0 +1,51 @@ +#include <linux/ucs2_string.h> +#include <linux/module.h> + +/* Return the number of unicode characters in data */ +unsigned long +ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) +{ + unsigned long length = 0; + + while (*s++ != 0 && length < maxlength) + length++; + return length; +} +EXPORT_SYMBOL(ucs2_strnlen); + +unsigned long +ucs2_strlen(const ucs2_char_t *s) +{ + return ucs2_strnlen(s, ~0UL); +} +EXPORT_SYMBOL(ucs2_strlen); + +/* + * Return the number of bytes is the length of this string + * Note: this is NOT the same as the number of unicode characters + */ +unsigned long +ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) +{ + return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); +} +EXPORT_SYMBOL(ucs2_strsize); + +int +ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) +{ + while (1) { + if (len == 0) + return 0; + if (*a < *b) + return -1; + if (*a > *b) + return 1; + if (*a == 0) /* implies *b == 0 */ + return 0; + a++; + b++; + len--; + } +} +EXPORT_SYMBOL(ucs2_strncmp); diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 82a04d7ba99e..08837db52d94 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -15,7 +15,7 @@ config XZ_DEC_X86 config XZ_DEC_POWERPC bool "PowerPC BCJ filter decoder" - default y if POWERPC + default y if PPC select XZ_DEC_BCJ config XZ_DEC_IA64 diff --git a/mm/Kconfig b/mm/Kconfig index ae55c1e04d10..3bea74f1ccfe 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -286,8 +286,12 @@ config NR_QUICK default "1" config VIRT_TO_BUS - def_bool y - depends on HAVE_VIRT_TO_BUS + bool + help + An architecture should select this if it implements the + deprecated interface virt_to_bus(). All new architectures + should probably not select this. + config MMU_NOTIFIER bool diff --git a/mm/fremap.c b/mm/fremap.c index 0cd4c11488ed..87da3590c61e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -129,7 +129,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, struct vm_area_struct *vma; int err = -EINVAL; int has_write_lock = 0; - vm_flags_t vm_flags; + vm_flags_t vm_flags = 0; if (prot) return err; @@ -204,10 +204,8 @@ get_write_lock: unsigned long addr; struct file *file = get_file(vma->vm_file); - vm_flags = vma->vm_flags; - if (!(flags & MAP_NONBLOCK)) - vm_flags |= VM_POPULATE; - addr = mmap_region(file, start, size, vm_flags, pgoff); + addr = mmap_region(file, start, size, + vma->vm_flags, pgoff); fput(file); if (IS_ERR_VALUE(addr)) { err = addr; @@ -226,12 +224,6 @@ get_write_lock: mutex_unlock(&mapping->i_mmap_mutex); } - if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) { - if (!has_write_lock) - goto get_write_lock; - vma->vm_flags |= VM_POPULATE; - } - if (vma->vm_flags & VM_LOCKED) { /* * drop PG_Mlocked flag for over-mapped range @@ -254,7 +246,8 @@ get_write_lock: */ out: - vm_flags = vma->vm_flags; + if (vma) + vm_flags = vma->vm_flags; if (likely(!has_write_lock)) up_read(&mm->mmap_sem); else diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a0be33bb199..1a12f5b9a0ab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf) /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { - struct hstate *h = &default_hstate; - return h->nr_huge_pages * pages_per_huge_page(h); + struct hstate *h; + unsigned long nr_total_pages = 0; + + for_each_hstate(h) + nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); + return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) @@ -2957,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - if (absent || + /* + * We need call hugetlb_fault for both hugepages under migration + * (in which case hugetlb_fault waits for the migration,) and + * hwpoisoned hugepages (in which case we need to prevent the + * caller from accessing to them.) In order to do this, we use + * here is_swap_pte instead of is_hugetlb_entry_migration and + * is_hugetlb_entry_hwpoisoned. This is because it simply covers + * both cases, and because we can't follow correct pages + * directly from any kind of swap entries. + */ + if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; diff --git a/mm/memory.c b/mm/memory.c index 494526ae024a..ba94dec5b259 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->mm = mm; tlb->fullmm = fullmm; + tlb->need_flush_all = 0; tlb->start = -1UL; tlb->end = 0; tlb->need_flush = 0; @@ -2392,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +/** + * vm_iomap_memory - remap memory to userspace + * @vma: user vma to map to + * @start: start of area + * @len: size of area + * + * This is a simplified io_remap_pfn_range() for common driver use. The + * driver just needs to give us the physical memory range to be mapped, + * we'll figure out the rest from the vma information. + * + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get + * whatever write-combining details or similar. + */ +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ + unsigned long vm_len, pfn, pages; + + /* Check that the physical memory area passed in looks valid */ + if (start + len < start) + return -EINVAL; + /* + * You *really* shouldn't map things that aren't page-aligned, + * but we've historically allowed it because IO memory might + * just have smaller alignment. + */ + len += start & ~PAGE_MASK; + pfn = start >> PAGE_SHIFT; + pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; + if (pfn + pages < pfn) + return -EINVAL; + + /* We start the mapping 'vm_pgoff' pages into the area */ + if (vma->vm_pgoff > pages) + return -EINVAL; + pfn += vma->vm_pgoff; + pages -= vma->vm_pgoff; + + /* Can we fit all of the mapping? */ + vm_len = vma->vm_end - vma->vm_start; + if (vm_len >> PAGE_SHIFT > pages) + return -EINVAL; + + /* Ok, let it rip */ + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b81a367b9f39..ee3765760818 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1779,7 +1779,11 @@ void try_offline_node(int nid) for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; - if (zone->wait_table) + /* + * wait_table may be allocated from boot memory, + * here only free if it's allocated by vmalloc. + */ + if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } @@ -1801,7 +1805,7 @@ int __ref remove_memory(int nid, u64 start, u64 size) int retry = 1; start_pfn = PFN_DOWN(start); - end_pfn = start_pfn + PFN_DOWN(size); + end_pfn = PFN_UP(start + size - 1); /* * When CONFIG_MEMCG is on, one memory block may be used by other diff --git a/mm/mlock.c b/mm/mlock.c index 1c5e33fce639..79b7cf7d1bca 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on) newflags = vma->vm_flags & ~VM_LOCKED; if (on) - newflags |= VM_LOCKED | VM_POPULATE; + newflags |= VM_LOCKED; tmp = vma->vm_end; if (tmp > end) @@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); - if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != - VM_POPULATE) + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; @@ -492,9 +491,9 @@ static int do_mlockall(int flags) struct vm_area_struct * vma, * prev = NULL; if (flags & MCL_FUTURE) - current->mm->def_flags |= VM_LOCKED | VM_POPULATE; + current->mm->def_flags |= VM_LOCKED; else - current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); + current->mm->def_flags &= ~VM_LOCKED; if (flags == MCL_FUTURE) goto out; @@ -503,7 +502,7 @@ static int do_mlockall(int flags) newflags = vma->vm_flags & ~VM_LOCKED; if (flags & MCL_CURRENT) - newflags |= VM_LOCKED | VM_POPULATE; + newflags |= VM_LOCKED; /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); diff --git a/mm/mmap.c b/mm/mmap.c index 2664a47cec93..0db0de1c2fbe 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, } addr = mmap_region(file, addr, len, vm_flags, pgoff); - if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) + if (!IS_ERR_VALUE(addr) && + ((vm_flags & VM_LOCKED) || + (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } @@ -1938,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { struct rb_node *rb_node; diff --git a/mm/nommu.c b/mm/nommu.c index e19328087534..e001768b14e8 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) struct vm_area_struct *vma; /* check the cache first */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (vma && vma->vm_start <= addr && vma->vm_end > addr) return vma; @@ -1838,6 +1838,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ + unsigned long pfn = start >> PAGE_SHIFT; + unsigned long vm_len = vma->vm_end - vma->vm_start; + + pfn += vma->vm_pgoff; + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); + int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 926b46649749..fd26d0433509 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -429,12 +429,6 @@ compat_process_vm_rw(compat_pid_t pid, if (flags != 0) return -EINVAL; - if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec))) - goto out; - - if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec))) - goto out; - if (vm_write) rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV, iovstack_l, @@ -459,8 +453,6 @@ free_iovecs: kfree(iov_r); if (iov_l != iovstack_l) kfree(iov_l); - -out: return rc; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 88c5fed8b9a4..669fba39be1a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3188,9 +3188,9 @@ int kswapd_run(int nid) if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); - pgdat->kswapd = NULL; pr_err("Failed to start kswapd on node %d\n", nid); ret = PTR_ERR(pgdat->kswapd); + pgdat->kswapd = NULL; } return ret; } diff --git a/net/802/mrp.c b/net/802/mrp.c index a4cc3229952a..e085bcc754f6 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) * all pending messages before the applicant is gone. */ del_timer_sync(&app->join_timer); + + spin_lock(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); mrp_pdu_queue(app); + spin_unlock(&app->lock); + mrp_queue_xmit(app); dev_mc_del(dev, appl->group_address); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a18714469bf7..85addcd9372b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) grp = &vlan_info->grp; - /* Take it out of our own structures, but be sure to interlock with - * HW accelerating devices or SW vlan input packet processing if - * VLAN is not 0 (leave it there for 802.1p). - */ - if (vlan_id) - vlan_vid_del(real_dev, vlan_id); - grp->nr_vlan_devs--; if (vlan->flags & VLAN_FLAG_MVRP) @@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) vlan_gvrp_uninit_applicant(real_dev); } + /* Take it out of our own structures, but be sure to interlock with + * HW accelerating devices or SW vlan input packet processing if + * VLAN is not 0 (leave it there for 802.1p). + */ + if (vlan_id) + vlan_vid_del(real_dev, vlan_id); + /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf4..737bef59ce89 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, struct sk_buff *skb; int copied, error = -EINVAL; + msg->msg_namelen = 0; + if (sock->state != SS_CONNECTED) return -ENOTCONN; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7b11f8bc5071..e277e38f736b 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, ax25_address src; const unsigned char *mac = skb_mac_header(skb); + memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a0b253ecadaf..a5bb0a769eb9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; /* unpack the aggregated packets and process them one by one */ - do { + while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, + batadv_ogm_packet->tt_num_changes)) { tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, @@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, packet_pos = packet_buff + buff_pos; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; - } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, - batadv_ogm_packet->tt_num_changes)); + } kfree_skb(skb); return NET_RX_SUCCESS; diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 0488d70c8c35..fa563e497c48 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface) atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); } -int batadv_is_my_mac(const uint8_t *addr) +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) { const struct batadv_hard_iface *hard_iface; @@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr) if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; + if (hard_iface->soft_iface != bat_priv->soft_iface) + continue; + if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { rcu_read_unlock(); return 1; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ced08b936a96..d40910dfc8ea 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue; int batadv_mesh_init(struct net_device *soft_iface); void batadv_mesh_free(struct net_device *soft_iface); -int batadv_is_my_mac(const uint8_t *addr); +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_hard_iface * batadv_seq_print_text_primary_if_get(struct seq_file *seq); int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 5ee21cebbbb0..319f2906c71a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, goto out; /* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) goto out; icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; @@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, } /* packet for me */ - if (batadv_is_my_mac(icmp_packet->dst)) + if (batadv_is_my_mac(bat_priv, icmp_packet->dst)) return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); /* TTL exceeded */ @@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig, return router; } -static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) +static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, + struct sk_buff *skb, int hdr_size) { struct ethhdr *ethhdr; @@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) return -1; /* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) return -1; return 0; @@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) char tt_flag; size_t packet_size; - if (batadv_check_unicast_packet(skb, hdr_size) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) return NET_RX_DROP; /* I could need to modify it */ @@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) case BATADV_TT_RESPONSE: batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); - if (batadv_is_my_mac(tt_query->dst)) { + if (batadv_is_my_mac(bat_priv, tt_query->dst)) { /* packet needs to be linearized to access the TT * changes */ @@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) struct batadv_roam_adv_packet *roam_adv_packet; struct batadv_orig_node *orig_node; - if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, + sizeof(*roam_adv_packet)) < 0) goto out; batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; - if (!batadv_is_my_mac(roam_adv_packet->dst)) + if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst)) return batadv_route_unicast_packet(skb, recv_if); /* check if it is a backbone gateway. we don't accept @@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, * last time) the packet had an updated information or not */ curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); - if (!batadv_is_my_mac(unicast_packet->dest)) { + if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) { orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest); /* if it is not possible to find the orig_node representing the @@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, if (is4addr) hdr_size = sizeof(*unicast_4addr_packet); - if (batadv_check_unicast_packet(skb, hdr_size) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) return NET_RX_DROP; if (!batadv_check_unicast_ttvn(bat_priv, skb)) return NET_RX_DROP; /* packet for me */ - if (batadv_is_my_mac(unicast_packet->dest)) { + if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { if (is4addr) { batadv_dat_inc_counter(bat_priv, unicast_4addr_packet->subtype); @@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, struct sk_buff *new_skb = NULL; int ret; - if (batadv_check_unicast_packet(skb, hdr_size) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) return NET_RX_DROP; if (!batadv_check_unicast_ttvn(bat_priv, skb)) @@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, unicast_packet = (struct batadv_unicast_frag_packet *)skb->data; /* packet for me */ - if (batadv_is_my_mac(unicast_packet->dest)) { + if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); if (ret == NET_RX_DROP) @@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, goto out; /* ignore broadcasts sent by myself */ - if (batadv_is_my_mac(ethhdr->h_source)) + if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto out; bcast_packet = (struct batadv_bcast_packet *)skb->data; /* ignore broadcasts originated by myself */ - if (batadv_is_my_mac(bcast_packet->orig)) + if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) goto out; if (bcast_packet->header.ttl < 2) @@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb, ethhdr = (struct ethhdr *)skb_mac_header(skb); /* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) return NET_RX_DROP; /* ignore own packets */ - if (batadv_is_my_mac(vis_packet->vis_orig)) + if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig)) return NET_RX_DROP; - if (batadv_is_my_mac(vis_packet->sender_orig)) + if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig)) return NET_RX_DROP; switch (vis_packet->vis_type) { diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 98a66a021a60..7abee19567e9 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1953,7 +1953,7 @@ out: bool batadv_send_tt_response(struct batadv_priv *bat_priv, struct batadv_tt_query_packet *tt_request) { - if (batadv_is_my_mac(tt_request->dst)) { + if (batadv_is_my_mac(bat_priv, tt_request->dst)) { /* don't answer backbone gws! */ if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) return true; diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c053244b97bd..6a1e646be96d 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, /* Are we the target for this VIS packet? */ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && - batadv_is_my_mac(vis_packet->target_orig)) + batadv_is_my_mac(bat_priv, vis_packet->target_orig)) are_target = 1; spin_lock_bh(&bat_priv->vis.hash_lock); @@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, batadv_send_list_add(bat_priv, info); /* ... we're not the recipient (and thus need to forward). */ - } else if (!batadv_is_my_mac(packet->target_orig)) { + } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) { batadv_send_list_add(bat_priv, info); } diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d3ee69b35a78..0d1b08cc76e1 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (flags & (MSG_OOB)) return -EOPNOTSUPP; + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, return err; } - msg->msg_namelen = 0; - copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c23bae86263b..7c9224bcce17 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); + msg->msg_namelen = 0; return 0; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 79d87d8d4f51..fb6192c9812e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk) sco_chan_del(sk, ECONNRESET); break; + case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); @@ -664,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b28..314c73ed418f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - mdst = br_mdb_get(br, skb); + mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b0812c91c0f0..bab338e6270d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, return 0; br_warn(br, "adding interface %s with same address " "as a received packet\n", - source->dev->name); + source ? source->dev->name : br->dev->name); fdb_delete(br, fdb); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ef1b91431c6b..459dab22b3f6 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p) struct net_device *dev = p->dev; struct net_bridge *br = p->br; - if (netif_running(dev) && netif_oper_up(dev)) + if (!(p->flags & BR_ADMIN_COST) && + netif_running(dev) && netif_oper_up(dev)) p->path_cost = port_cost(dev); if (!netif_running(br->dev)) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 480330151898..828e2bcc1f52 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (is_broadcast_ether_addr(dest)) skb2 = skb; else if (is_multicast_ether_addr(dest)) { - mdst = br_mdb_get(br, skb); + mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && mdst->mglist) || br_multicast_is_router(br)) diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc65..ee79f3f20383 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, port = p->port; if (port) { struct br_mdb_entry e; + memset(&e, 0, sizeof(e)); e.ifindex = port->dev->ifindex; e.state = p->state; if (p->addr.proto == htons(ETH_P_IP)) @@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) break; bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); bpm->ifindex = dev->ifindex; if (br_mdb_fill_info(skb, cb, dev) < 0) goto out; @@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, return -EMSGSIZE; bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); bpm->family = AF_BRIDGE; bpm->ifindex = dev->ifindex; nest = nla_nest_start(skb, MDBA_MDB); @@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, { struct br_mdb_entry entry; + memset(&entry, 0, sizeof(entry)); entry.ifindex = port->dev->ifindex; entry.addr.proto = group->proto; entry.addr.u.ip4 = group->u.ip4; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb62..923fbeaf7afd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( #endif struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb) + struct sk_buff *skb, u16 vid) { struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; @@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, return NULL; ip.proto = skb->protocol; + ip.vid = vid; switch (skb->protocol) { case htons(ETH_P_IP): diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 27aa3ee517ce..299fc5f40a26 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + 0; } @@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); + br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bcb..d2c043a857b6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -156,6 +156,7 @@ struct net_bridge_port #define BR_BPDU_GUARD 0x00000002 #define BR_ROOT_BLOCK 0x00000004 #define BR_MULTICAST_FAST_LEAVE 0x00000008 +#define BR_ADMIN_COST 0x00000010 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING u32 multicast_startup_queries_sent; @@ -442,7 +443,7 @@ extern int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb); extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb); + struct sk_buff *skb, u16 vid); extern void br_multicast_add_port(struct net_bridge_port *port); extern void br_multicast_del_port(struct net_bridge_port *port); extern void br_multicast_enable_port(struct net_bridge_port *port); @@ -504,7 +505,7 @@ static inline int br_multicast_rcv(struct net_bridge *br, } static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb) + struct sk_buff *skb, u16 vid) { return NULL; } diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 0bdb4ebd362b..d45e760141bb 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost) path_cost > BR_MAX_PATH_COST) return -ERANGE; + p->flags |= BR_ADMIN_COST; p->path_cost = path_cost; br_configuration_update(p->br); br_port_state_selection(p->br); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f83902..ff2ff3ce6965 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; diff --git a/net/can/gw.c b/net/can/gw.c index 2d117dc5ebea..117814a7e73c 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb, if (gwj->src.dev == dev || gwj->dst.dev == dev) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kfree(gwj); + kmem_cache_free(cgw_cache, gwj); } } } @@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void) hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kfree(gwj); + kmem_cache_free(cgw_cache, gwj); } } @@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kfree(gwj); + kmem_cache_free(cgw_cache, gwj); err = 0; break; } diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 69bc4bf89e3e..4543b9aba40c 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) return 0; } +static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) +{ + u8 v; + + ceph_decode_need(p, end, 1+8+4+4, bad); + v = ceph_decode_8(p); + if (v != 1) + goto bad; + pg->pool = ceph_decode_64(p); + pg->seed = ceph_decode_32(p); + *p += 4; /* skip preferred */ + return 0; + +bad: + dout("error decoding pgid\n"); + return -EINVAL; +} + /* * decode a full map. */ @@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) for (i = 0; i < len; i++) { int n, j; struct ceph_pg pgid; - struct ceph_pg_v1 pgid_v1; struct ceph_pg_mapping *pg; - ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); - ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); - pgid.pool = le32_to_cpu(pgid_v1.pool); - pgid.seed = le16_to_cpu(pgid_v1.ps); + err = __decode_pgid(p, end, &pgid); + if (err) + goto bad; + ceph_decode_need(p, end, sizeof(u32), bad); n = ceph_decode_32(p); err = -EINVAL; if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) @@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, u16 version; ceph_decode_16_safe(p, end, version, bad); - if (version > 6) { - pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); + if (version != 6) { + pr_warning("got unknown v %d != 6 of inc osdmap\n", version); goto bad; } @@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, while (len--) { struct ceph_pg_mapping *pg; int j; - struct ceph_pg_v1 pgid_v1; struct ceph_pg pgid; u32 pglen; - ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); - ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); - pgid.pool = le32_to_cpu(pgid_v1.pool); - pgid.seed = le16_to_cpu(pgid_v1.ps); - pglen = ceph_decode_32(p); + err = __decode_pgid(p, end, &pgid); + if (err) + goto bad; + ceph_decode_need(p, end, sizeof(u32), bad); + pglen = ceph_decode_32(p); if (pglen) { ceph_decode_need(p, end, pglen*sizeof(u32), bad); diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..b24ab0e98eb4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1545,7 +1545,6 @@ void net_enable_timestamp(void) return; } #endif - WARN_ON(in_interrupt()); static_key_slow_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); @@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) } skb_orphan(skb); - nf_reset(skb); if (unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); @@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) skb->mark = 0; secpath_reset(skb); nf_reset(skb); + nf_reset_trace(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); @@ -2149,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) struct net_device *dev = skb->dev; const char *driver = ""; + if (!net_ratelimit()) + return; + if (dev && dev->dev.parent) driver = dev_driver_string(dev->dev.parent); @@ -2219,9 +2221,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_offload *ptype; __be16 type = skb->protocol; + int vlan_depth = ETH_HLEN; while (type == htons(ETH_P_8021Q)) { - int vlan_depth = ETH_HLEN; struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) @@ -3315,6 +3317,7 @@ int netdev_rx_handler_register(struct net_device *dev, if (dev->rx_handler) return -EBUSY; + /* Note: rx_handler_data must be set before rx_handler */ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); @@ -3335,6 +3338,11 @@ void netdev_rx_handler_unregister(struct net_device *dev) ASSERT_RTNL(); RCU_INIT_POINTER(dev->rx_handler, NULL); + /* a reader seeing a non NULL rx_handler in a rcu_read_lock() + * section has a guarantee to see a non NULL rx_handler_data + * as well. + */ + synchronize_net(); RCU_INIT_POINTER(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); @@ -3444,6 +3452,7 @@ ncls: } switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: + ret = NET_RX_SUCCESS; goto unlock; case RX_HANDLER_ANOTHER: goto another_round; @@ -4103,7 +4112,7 @@ static void net_rx_action(struct softirq_action *h) * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ - if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) + if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) goto softnet_break; local_irq_enable(); @@ -4780,7 +4789,7 @@ EXPORT_SYMBOL(dev_set_mac_address); /** * dev_change_carrier - Change device carrier * @dev: device - * @new_carries: new value + * @new_carrier: new value * * Change device carrier */ diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e369..abdc9e6ef33e 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, ha->type = addr_type; ha->refcount = 1; ha->global_use = global; - ha->synced = false; + ha->synced = 0; list_add_tail_rcu(&ha->list, &list->list); list->count++; @@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, addr_len, ha->type); if (err) break; - ha->synced = true; + ha->synced++; ha->refcount++; } else if (ha->refcount == 1) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); @@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, if (ha->synced) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); - ha->synced = false; + ha->synced--; __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c7..2bfd081c59f7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) struct flow_flush_info *info = data; struct tasklet_struct *tasklet; - tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); + tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; tasklet->data = (unsigned long)info; tasklet_schedule(tasklet); } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9d4c7201400d..e187bf06d673 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -140,6 +140,8 @@ ipv6: flow->ports = *ports; } + flow->thoff = (u16) nhoff; + return true; } EXPORT_SYMBOL(skb_flow_dissect); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..23854b51a259 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) } if (ops->fill_info) { data = nla_nest_start(skb, IFLA_INFO_DATA); - if (data == NULL) + if (data == NULL) { + err = -EMSGSIZE; goto err_cancel_link; + } err = ops->fill_info(skb, dev); if (err < 0) goto err_cancel_data; @@ -979,6 +981,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, * report anything. */ ivi.spoofchk = -1; + memset(ivi.mac, 0, sizeof(ivi.mac)); if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = @@ -1069,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); cb->seq = net->dev_base_seq; - if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) @@ -1919,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) u32 ext_filter_mask = 0; u16 min_ifinfo_dump_size = 0; - if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); @@ -2620,7 +2623,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { - unsigned int flavor = attr->rta_type; + unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; if (flavor) { if (flavor > rta_max[sz_idx]) return -EINVAL; diff --git a/net/core/scm.c b/net/core/scm.c index 905dcc6ad1e3..2dc6cdaaae8a 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -24,6 +24,7 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/security.h> +#include <linux/pid_namespace.h> #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/slab.h> @@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds) if (!uid_valid(uid) || !gid_valid(gid)) return -EINVAL; - if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && + if ((creds->pid == task_tgid_vnr(current) || + ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf80..21291f1abcd6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; + memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); @@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getets) { struct ieee_ets ets; + memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) @@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; + memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, @@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getpfc) { struct ieee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) @@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; + memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) @@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) @@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; + memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) @@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->cee_peer_getpfc) { struct cee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3f..bba5f8336317 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -84,7 +84,7 @@ (memcmp(addr1, addr2, length >> 3) == 0) /* local link, i.e. FE80::/10 */ -#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) +#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) /* * check whether we can compress the IID to 16 bits, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..c929d9c1c4b6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, iph->frag_off |= htons(IP_MF); offset += (skb->len - skb->mac_len - iph->ihl * 4); } else { - if (!(iph->frag_off & htons(IP_DF))) - iph->id = htons(id++); + iph->id = htons(id++); } iph->tot_len = htons(skb->len - skb->mac_len); iph->check = 0; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..c6287cd978c2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work) { unsigned long now, next, next_sec, next_sched; struct in_ifaddr *ifa; + struct hlist_node *n; int i; now = jiffies; next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); - rcu_read_lock(); for (i = 0; i < IN4_ADDR_HSIZE; i++) { + bool change_needed = false; + + rcu_read_lock(); hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { unsigned long age; @@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work) if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && age >= ifa->ifa_valid_lft) { - struct in_ifaddr **ifap ; - - rtnl_lock(); - for (ifap = &ifa->ifa_dev->ifa_list; - *ifap != NULL; ifap = &ifa->ifa_next) { - if (*ifap == ifa) - inet_del_ifa(ifa->ifa_dev, - ifap, 1); - } - rtnl_unlock(); + change_needed = true; } else if (ifa->ifa_preferred_lft == INFINITY_LIFE_TIME) { continue; @@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work) next = ifa->ifa_tstamp + ifa->ifa_valid_lft * HZ; - if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { - ifa->ifa_flags |= IFA_F_DEPRECATED; - rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); - } + if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) + change_needed = true; } else if (time_before(ifa->ifa_tstamp + ifa->ifa_preferred_lft * HZ, next)) { @@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work) ifa->ifa_preferred_lft * HZ; } } + rcu_read_unlock(); + if (!change_needed) + continue; + rtnl_lock(); + hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { + unsigned long age; + + if (ifa->ifa_flags & IFA_F_PERMANENT) + continue; + + /* We try to batch several events at once. */ + age = (now - ifa->ifa_tstamp + + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; + + if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && + age >= ifa->ifa_valid_lft) { + struct in_ifaddr **ifap; + + for (ifap = &ifa->ifa_dev->ifa_list; + *ifap != NULL; ifap = &(*ifap)->ifa_next) { + if (*ifap == ifa) { + inet_del_ifa(ifa->ifa_dev, + ifap, 1); + break; + } + } + } else if (ifa->ifa_preferred_lft != + INFINITY_LIFE_TIME && + age >= ifa->ifa_preferred_lft && + !(ifa->ifa_flags & IFA_F_DEPRECATED)) { + ifa->ifa_flags |= IFA_F_DEPRECATED; + rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); + } + } + rtnl_unlock(); } - rcu_read_unlock(); next_sec = round_jiffies_up(next); next_sched = next; @@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg if (nlh->nlmsg_flags & NLM_F_EXCL || !(nlh->nlmsg_flags & NLM_F_REPLACE)) return -EEXIST; - - set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); + ifa = ifa_existing; + set_ifa_lifetime(ifa, valid_lft, prefered_lft); + cancel_delayed_work(&check_lifetime_work); + schedule_delayed_work(&check_lifetime_work, 0); + rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); + blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); } return 0; } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3b4f0cd2e63e..4cfe34d4cc96 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) /* skb is pure payload to encrypt */ - err = -ENOMEM; - esp = x->data; aead = esp->aead; alen = crypto_aead_authsize(aead); @@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); - if (!tmp) + if (!tmp) { + err = -ENOMEM; goto error; + } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); * tcp/dccp_create_openreq_child(). */ void inet_csk_prepare_forced_close(struct sock *sk) + __releases(&sk->sk_lock.slock) { /* sk_clone_lock locked the socket and set refcnt to 2 */ bh_unlock_sock(sk); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07f..f4fd23de9b13 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -21,6 +21,7 @@ #include <linux/rtnetlink.h> #include <linux/slab.h> +#include <net/sock.h> #include <net/inet_frag.h> static void inet_frag_secret_rebuild(unsigned long dummy) @@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, __releases(&f->lock) { struct inet_frag_queue *q; + int depth = 0; hlist_for_each_entry(q, &f->hash[hash], list) { if (q->net == nf && f->match(q, key)) { @@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, read_unlock(&f->lock); return q; } + depth++; } read_unlock(&f->lock); - return inet_frag_create(nf, f, key); + if (depth <= INETFRAGS_MAXDEPTH) + return inet_frag_create(nf, f, key); + else + return ERR_PTR(-ENOBUFS); } EXPORT_SYMBOL(inet_frag_find); + +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, + const char *prefix) +{ + static const char msg[] = "inet_frag_find: Fragment hash bucket" + " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) + ". Dropping fragment.\n"; + + if (PTR_ERR(q) == -ENOBUFS) + LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); +} +EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600c..52c273ea05c3 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg) if (!head->dev) goto out_rcu_unlock; - /* skb dst is stale, drop it, and perform route lookup again */ - skb_dst_drop(head); + /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); @@ -292,14 +291,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); - if (q == NULL) - goto out_nomem; - + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); + return NULL; + } return container_of(q, struct ipq, q); - -out_nomem: - LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); - return NULL; } /* Is the fragment too far ahead to be part of ipq? */ @@ -526,9 +522,16 @@ found: qp->q.max_size = skb->len + ihl; if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - qp->q.meat == qp->q.len) - return ip_frag_reasm(qp, prev, dev); + qp->q.meat == qp->q.len) { + unsigned long orefdst = skb->_skb_refdst; + + skb->_skb_refdst = 0UL; + err = ip_frag_reasm(qp, prev, dev); + skb->_skb_refdst = orefdst; + return err; + } + skb_dst_drop(skb); inet_frag_lru_move(&qp->q); return -EINPROGRESS; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..91d66dbde9c0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if (dev->header_ops && dev->type == ARPHRD_IPGRE) { gre_hlen = 0; - if (skb->protocol == htons(ETH_P_IP)) - tiph = (const struct iphdr *)skb->data; - else - tiph = &tunnel->parms.iph; + tiph = (const struct iphdr *)skb->data; } else { gre_hlen = tunnel->hlen; tiph = &tunnel->parms.iph; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..ec7264514a82 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: - opt->ts = optptr - iph; if (skb) timeptr = &optptr[optptr[2]-1]; opt->ts_needtime = 1; @@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } - opt->ts = optptr - iph; if (rt) { spec_dst_fill(&spec_dst, skb); memcpy(&optptr[optptr[2]-1], &spec_dst, 4); @@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } - opt->ts = optptr - iph; { __be32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); @@ -423,18 +420,18 @@ int ip_options_compile(struct net *net, put_unaligned_be32(midtime, timeptr); opt->is_changed = 1; } - } else { + } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { unsigned int overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; } - opt->ts = optptr - iph; if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } + opt->ts = optptr - iph; break; case IPOPT_RA: if (optlen < 4) { diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc6877019..bf6c5cf31aed 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void) } for (i++; i < CONF_NAMESERVERS_MAX; i++) if (ic_nameservers[i] != NONE) - pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); + pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); + pr_cont("\n"); #endif /* !SILENT */ return 0; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09f..0d755c50994b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT If unsure, say Y. -config IP_NF_QUEUE - tristate "IP Userspace queueing via NETLINK (OBSOLETE)" - depends on NETFILTER_ADVANCED - help - Netfilter has the ability to queue packets to user space: the - netlink device can be used to access them using this driver. - - This option enables the old IPv4-only "ip_queue" implementation - which has been obsoleted by the new "nfnetlink_queue" code (see - CONFIG_NETFILTER_NETLINK_QUEUE). - - To compile it as a module, choose M here. If unsure, say N. - config IP_NF_IPTABLES tristate "IP tables support (required for filtering/masq/NAT)" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c30130062cd6..c49dcd0284a0 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4, return dev_match; } +static bool rpfilter_is_local(const struct sk_buff *skb) +{ + const struct rtable *rt = skb_rtable(skb); + return rt && (rt->rt_flags & RTCF_LOCAL); +} + static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_rpfilter_info *info; @@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) info = par->matchinfo; invert = info->flags & XT_RPFILTER_INVERT; - if (par->in->flags & IFF_LOOPBACK) + if (rpfilter_is_local(skb)) return true ^ invert; iph = ip_hdr(skb); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ef54377fb11c..397e0f69435f 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, * hasn't changed since we received the original syn, but I see * no easy way to do this. */ - flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), - RT_SCOPE_UNIVERSE, IPPROTO_TCP, + flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, + RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..e22020790709 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ - skb->avail_size = size; + skb->reserved_tailroom = skb->end - skb->tail - size; return skb; } __kfree_skb(skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d9bdacce99f..13b9c08fc158 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2; #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -2059,11 +2060,8 @@ void tcp_enter_loss(struct sock *sk, int how) if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); - if (!how) { - /* Push undo marker, if it was plain RTO and nothing - * was retransmitted. */ - tp->undo_marker = tp->snd_una; - } else { + tp->undo_marker = tp->snd_una; + if (how) { tp->sacked_out = 0; tp->fackets_out = 0; } @@ -3567,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk) } } +static void tcp_store_ts_recent(struct tcp_sock *tp) +{ + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; + tp->rx_opt.ts_recent_stamp = get_seconds(); +} + +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +{ + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { + /* PAWS bug workaround wrt. ACK frames, the PAWS discard + * extra check below makes sure this can only happen + * for pure ACK frames. -DaveM + * + * Not only, also it occurs for expired timestamps. + */ + + if (tcp_paws_check(&tp->rx_opt, 0)) + tcp_store_ts_recent(tp); + } +} + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { @@ -3610,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp); + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + if (flag & FLAG_UPDATE_TS_RECENT) + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. @@ -3930,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif -static inline void tcp_store_ts_recent(struct tcp_sock *tp) -{ - tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = get_seconds(); -} - -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) -{ - if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { - /* PAWS bug workaround wrt. ACK frames, the PAWS discard - * extra check below makes sure this can only happen - * for pure ACK frames. -DaveM - * - * Not only, also it occurs for expired timestamps. - */ - - if (tcp_paws_check(&tp->rx_opt, 0)) - tcp_store_ts_recent(tp); - } -} - /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) @@ -5546,14 +5550,9 @@ slow_path: return 0; step5: - if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) + if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) goto discard; - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -5989,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* step 5: check the ACK field */ if (true) { - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; + int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | + FLAG_UPDATE_TS_RECENT) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: @@ -6140,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } } - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..d09203c63264 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk) struct inet_sock *inet = inet_sk(sk); u32 mtu = tcp_sk(sk)->mtu_info; - /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs - * send out by Linux are always <576bytes so they should go through - * unfragmented). - */ - if (sk->sk_state == TCP_LISTEN) - return; - dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ + /* We are not interested in TCP_LISTEN and open_requests + * (SYN-ACKs send out by Linux are always <576bytes so + * they should go through unfragmented). + */ + if (sk->sk_state == TCP_LISTEN) + goto out; + tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..509912a5ff98 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); - skb->avail_size -= eat; len -= eat; if (!len) return; @@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) goto send_now; } - /* Ok, it looks like it is advisable to defer. */ - tp->tso_deferred = 1 | (jiffies << 1); + /* Ok, it looks like it is advisable to defer. + * Do not rearm the timer if already set to not break TCP ACK clocking. + */ + if (!tp->tso_deferred) + tp->tso_deferred = 1 | (jiffies << 1); return true; @@ -2386,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) */ TCP_SKB_CB(skb)->when = tcp_time_stamp; - /* make sure skb->data is aligned on arches that require it */ - if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { + /* make sure skb->data is aligned on arches that require it + * and check if ack-trimming & collapsing extended the headroom + * beyond what csum_start can cover. + */ + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || + skb_headroom(skb) >= 0xFFFF)) { struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : @@ -2707,6 +2713,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, skb_reserve(skb, MAX_TCP_HEADER); skb_dst_set(skb, dst); + security_skb_owned_by(skb, sk); mss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..0a073a263720 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb) void udp_destroy_sock(struct sock *sk) { + struct udp_sock *up = udp_sk(sk); bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); unlock_sock_fast(sk, slow); + if (static_key_false(&udp_encap_needed) && up->encap_type) { + void (*encap_destroy)(struct sock *sk); + encap_destroy = ACCESS_ONCE(up->encap_destroy); + if (encap_destroy) + encap_destroy(sk); + } } /* diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f2c7e615f902..dae802c0af7c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, struct net_device *dev); -static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); - static struct ipv6_devconf ipv6_devconf __read_mostly = { .forwarding = 0, .hop_limit = IPV6_DEFAULT_HOPLIMIT, @@ -837,7 +835,7 @@ out2: rcu_read_unlock_bh(); if (likely(err == 0)) - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); + inet6addr_notifier_call_chain(NETDEV_UP, ifa); else { kfree(ifa); ifa = ERR_PTR(err); @@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ipv6_ifa_notify(RTM_DELADDR, ifp); - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); + inet6addr_notifier_call_chain(NETDEV_DOWN, ifp); /* * Purge or update corresponding prefix @@ -2529,6 +2527,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) static void init_loopback(struct net_device *dev) { struct inet6_dev *idev; + struct net_device *sp_dev; + struct inet6_ifaddr *sp_ifa; + struct rt6_info *sp_rt; /* ::1 */ @@ -2540,6 +2541,30 @@ static void init_loopback(struct net_device *dev) } add_addr(idev, &in6addr_loopback, 128, IFA_HOST); + + /* Add routes to other interface's IPv6 addresses */ + for_each_netdev(dev_net(dev), sp_dev) { + if (!strcmp(sp_dev->name, dev->name)) + continue; + + idev = __in6_dev_get(sp_dev); + if (!idev) + continue; + + read_lock_bh(&idev->lock); + list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { + + if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) + continue; + + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); + + /* Failure cases are ignored */ + if (!IS_ERR(sp_rt)) + ip6_ins_rt(sp_rt); + } + read_unlock_bh(&idev->lock); + } } static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) @@ -2961,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) if (state != INET6_IFADDR_STATE_DEAD) { __ipv6_ifa_notify(RTM_DELADDR, ifa); - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); + inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); } in6_ifa_put(ifa); @@ -4784,26 +4809,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev) static int __net_init addrconf_init_net(struct net *net) { - int err; + int err = -ENOMEM; struct ipv6_devconf *all, *dflt; - err = -ENOMEM; - all = &ipv6_devconf; - dflt = &ipv6_devconf_dflt; + all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); + if (all == NULL) + goto err_alloc_all; - if (!net_eq(net, &init_net)) { - all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); - if (all == NULL) - goto err_alloc_all; + dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); + if (dflt == NULL) + goto err_alloc_dflt; - dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); - if (dflt == NULL) - goto err_alloc_dflt; - } else { - /* these will be inherited by all namespaces */ - dflt->autoconf = ipv6_defaults.autoconf; - dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; - } + /* these will be inherited by all namespaces */ + dflt->autoconf = ipv6_defaults.autoconf; + dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; net->ipv6.devconf_all = all; net->ipv6.devconf_dflt = dflt; @@ -4848,22 +4867,6 @@ static struct pernet_operations addrconf_ops = { .exit = addrconf_exit_net, }; -/* - * Device notifier - */ - -int register_inet6addr_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(register_inet6addr_notifier); - -int unregister_inet6addr_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(unregister_inet6addr_notifier); - static struct rtnl_af_ops inet6_ops = { .family = AF_INET6, .fill_link_af = inet6_fill_link_af, diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d051e5f4bf34..72104562c864 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr) } EXPORT_SYMBOL(__ipv6_addr_type); +static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); + +int register_inet6addr_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&inet6addr_chain, nb); +} +EXPORT_SYMBOL(register_inet6addr_notifier); + +int unregister_inet6addr_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&inet6addr_chain, nb); +} +EXPORT_SYMBOL(unregister_inet6addr_notifier); + +int inet6addr_notifier_call_chain(unsigned long val, void *v) +{ + return atomic_notifier_call_chain(&inet6addr_chain, val, v); +} +EXPORT_SYMBOL(inet6addr_notifier_call_chain); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b1876e52091e..2bab2aa59745 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt ipv6_addr_loopback(&hdr->daddr)) goto err; + /* RFC4291 Errata ID: 3480 + * Interface-Local scope spans only a single interface on a + * node and is useful only for loopback transmission of + * multicast. Packets with interface-local scope received + * from another node must be discarded. + */ + if (!(skb->pkt_type == PACKET_LOOPBACK || + dev->flags & IFF_LOOPBACK) && + ipv6_addr_is_multicast(&hdr->daddr) && + IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) + goto err; + /* RFC4291 2.7 * Nodes must not originate a packet to a multicast address whose scope * field contains the reserved value 0; if such a packet is received, it @@ -281,7 +293,8 @@ int ip6_mc_input(struct sk_buff *skb) * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && - !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && + !(ipv6_addr_type(&hdr->daddr) & + (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 83acc1405a18..cb631143721c 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, if (pfx_len - i >= 32) mask = 0; else - mask = htonl(~((1 << (pfx_len - i)) - 1)); + mask = htonl((1 << (i - pfx_len + 32)) - 1); idx = i / 32; addr->s6_addr32[idx] &= mask; @@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par) static struct xt_target ip6t_npt_target_reg[] __read_mostly = { { .name = "SNPT", + .table = "mangle", .target = ip6t_snpt_tg, .targetsize = sizeof(struct ip6t_npt_tginfo), .checkentry = ip6t_npt_checkentry, @@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { }, { .name = "DNPT", + .table = "mangle", .target = ip6t_dnpt_tg, .targetsize = sizeof(struct ip6t_npt_tginfo), .checkentry = ip6t_npt_checkentry, diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 5060d54199ab..e0983f3648a6 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, return ret; } +static bool rpfilter_is_local(const struct sk_buff *skb) +{ + const struct rt6_info *rt = (const void *) skb_dst(skb); + return rt && (rt->rt6i_flags & RTF_LOCAL); +} + static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_rpfilter_info *info = par->matchinfo; @@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) struct ipv6hdr *iph; bool invert = info->flags & XT_RPFILTER_INVERT; - if (par->in->flags & IFF_LOOPBACK) + if (rpfilter_is_local(skb)) return true ^ invert; iph = ipv6_hdr(skb); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 54087e96d7b8..6700069949dd 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -14,6 +14,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv6-nf: " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> @@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); local_bh_enable(); - if (q == NULL) - goto oom; - + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); + return NULL; + } return container_of(q, struct frag_queue, q); - -oom: - return NULL; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3c6a77290c6e..0ba10e53a629 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -26,6 +26,9 @@ * YOSHIFUJI,H. @USAGI Always remove fragment header to * calculate ICV correctly. */ + +#define pr_fmt(fmt) "IPv6: " fmt + #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> @@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); - if (q == NULL) + if (IS_ERR_OR_NULL(q)) { + inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; - + } return container_of(q, struct frag_queue, q); } @@ -326,9 +330,17 @@ found: } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - fq->q.meat == fq->q.len) - return ip6_frag_reasm(fq, prev, dev); + fq->q.meat == fq->q.len) { + int res; + unsigned long orefdst = skb->_skb_refdst; + + skb->_skb_refdst = 0UL; + res = ip6_frag_reasm(fq, prev, dev); + skb->_skb_refdst = orefdst; + return res; + } + skb_dst_drop(skb); inet_frag_lru_move(&fq->q); return -1; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6460055df5..46a5be85be87 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -386,9 +386,17 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (dst) dst->ops->redirect(dst, sk, skb); + goto out; } if (type == ICMPV6_PKT_TOOBIG) { + /* We are not interested in TCP_LISTEN and open_requests + * (SYN-ACKs send out by Linux are always <576bytes so + * they should go through unfragmented). + */ + if (sk->sk_state == TCP_LISTEN) + goto out; + tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 599e1ba6d1ce..d8e5e852fc7a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1285,10 +1285,18 @@ do_confirm: void udpv6_destroy_sock(struct sock *sk) { + struct udp_sock *up = udp_sk(sk); lock_sock(sk); udp_v6_flush_pending_frames(sk); release_sock(sk); + if (static_key_false(&udpv6_encap_needed) && up->encap_type) { + void (*encap_destroy)(struct sock *sk); + encap_destroy = ACCESS_ONCE(up->encap_destroy); + if (encap_destroy) + encap_destroy(sk); + } + inet6_destroy_sock(sk); } diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d07e3a626446..e493b3397ae3 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) @@ -2583,8 +2585,10 @@ bed: NULL, NULL, NULL); /* Check if the we got some results */ - if (!self->cachedaddr) - return -EAGAIN; /* Didn't find any devices */ + if (!self->cachedaddr) { + err = -EAGAIN; /* Didn't find any devices */ + goto out; + } daddr = self->cachedaddr; /* Cleanup */ self->cachedaddr = 0; diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e530..362ba47968e4 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, struct tty_port *port = &self->port; DECLARE_WAITQUEUE(wait, current); int retval; - int do_clocal = 0, extra_count = 0; + int do_clocal = 0; unsigned long flags; IRDA_DEBUG(2, "%s()\n", __func__ ); @@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ - if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ - /* nonblock mode is set or port is not enabled */ + if (test_bit(TTY_IO_ERROR, &tty->flags)) { + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (filp->f_flags & O_NONBLOCK) { + /* nonblock mode is set */ + if (tty->termios.c_cflag & CBAUD) + tty_port_raise_dtr_rts(port); port->flags |= ASYNC_NORMAL_ACTIVE; IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); return 0; @@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, __FILE__, __LINE__, tty->driver->name, port->count); spin_lock_irqsave(&port->lock, flags); - if (!tty_hung_up_p(filp)) { - extra_count = 1; + if (!tty_hung_up_p(filp)) port->count--; - } - spin_unlock_irqrestore(&port->lock, flags); port->blocked_open++; + spin_unlock_irqrestore(&port->lock, flags); while (1) { if (tty->termios.c_cflag & CBAUD) tty_port_raise_dtr_rts(port); - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !test_bit(ASYNCB_INITIALIZED, &port->flags)) { @@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, __set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); - if (extra_count) { - /* ++ is not atomic, so this should be protected - Jean II */ - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) port->count++; - spin_unlock_irqrestore(&port->lock, flags); - } port->blocked_open--; + spin_unlock_irqrestore(&port->lock, flags); IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", __FILE__, __LINE__, tty->driver->name, port->count); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 29340a9a6fb9..e1b37f5a2691 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap, { struct iriap_cb *self; - IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); + IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, + irlmp_reason_str(reason), reason); self = instance; diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 6115a44c0a24..1064621da6f6 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -66,8 +66,15 @@ const char *irlmp_reasons[] = { "LM_LAP_RESET", "LM_INIT_DISCONNECT", "ERROR, NOT USED", + "UNKNOWN", }; +const char *irlmp_reason_str(LM_REASON reason) +{ + reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1); + return irlmp_reasons[reason]; +} + /* * Function irlmp_init (void) * @@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, { struct lsap_cb *lsap; - IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); + IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, + irlmp_reason_str(reason), reason); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a7d11ffe4284..206ce6db2c36 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] = #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) -/* macros to set/get socket control buffer at correct offset */ -#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ -#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) -#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ -#define CB_TRGCLS_LEN (TRGCLS_SIZE) - #define __iucv_sock_wait(sk, condition, timeo, ret) \ do { \ DEFINE_WAIT(__wait); \ @@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, /* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; - memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); + IUCV_SKB_CB(skb)->tag = txmsg.tag; if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_sent); @@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) return -ENOMEM; /* copy target class to control buffer of new skb */ - memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); + IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; /* copy data fragment */ memcpy(nskb->data, skb->data + copied, size); @@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, /* store msg target class in the second 4 bytes of skb ctrl buffer */ /* Note: the first 4 bytes are reserved for msg tag */ - memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); + IUCV_SKB_CB(skb)->class = msg->class; /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ if ((msg->flags & IUCV_IPRMDATA) && len > 7) { @@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, } } + IUCV_SKB_CB(skb)->offset = 0; if (sock_queue_rcv_skb(sk, skb)) skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); } @@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, unsigned int copied, rlen; struct sk_buff *skb, *rskb, *cskb; int err = 0; + u32 offset; + + msg->msg_namelen = 0; if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && @@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, return err; } - rlen = skb->len; /* real length of skb */ + offset = IUCV_SKB_CB(skb)->offset; + rlen = skb->len - offset; /* real length of skb */ copied = min_t(unsigned int, rlen, len); if (!rlen) sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; cskb = skb; - if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { + if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; @@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, * get the trgcls from the control buffer of the skb due to * fragmentation of original iucv message. */ err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, - CB_TRGCLS_LEN, CB_TRGCLS(skb)); + sizeof(IUCV_SKB_CB(skb)->class), + (void *)&IUCV_SKB_CB(skb)->class); if (err) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); @@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM) { - skb_pull(skb, copied); - if (skb->len) { - skb_queue_head(&sk->sk_receive_queue, skb); + if (copied < rlen) { + IUCV_SKB_CB(skb)->offset = offset + copied; goto done; } } @@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, spin_lock_bh(&iucv->message_q.lock); rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { + IUCV_SKB_CB(rskb)->offset = 0; if (sock_queue_rcv_skb(sk, rskb)) { skb_queue_head(&iucv->backlog_skb_q, rskb); @@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path, spin_lock_irqsave(&list->lock, flags); while (list_skb != (struct sk_buff *)list) { - if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { + if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { this = list_skb; break; } @@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); skb_reset_transport_header(skb); skb_reset_network_header(skb); + IUCV_SKB_CB(skb)->offset = 0; spin_lock(&iucv->message_q.lock); if (skb_queue_empty(&iucv->backlog_skb_q)) { if (sock_queue_rcv_skb(sk, skb)) { @@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, /* fall through and receive zero length data */ case 0: /* plain data frame */ - memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, - CB_TRGCLS_LEN); + IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; err = afiucv_hs_callback_rx(sk, skb); break; default: diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1ea..5b1e5af25713 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); xp->priority = pol->sadb_x_policy_priority; - sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); if (!xp->family) { err = -EINVAL; @@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ if (xp->selector.sport) xp->selector.sport_mask = htons(0xffff); - sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); xp->selector.prefixlen_d = sa->sadb_address_prefixlen; @@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa memset(&sel, 0, sizeof(sel)); - sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); sel.prefixlen_s = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); @@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa if (sel.sport) sel.sport_mask = htons(0xffff); - sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); sel.prefixlen_d = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); @@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c) hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d36875f3427e..8aecf5df6656 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -114,7 +114,6 @@ struct l2tp_net { static void l2tp_session_set_header_len(struct l2tp_session *session, int version); static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); -static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); static inline struct l2tp_net *l2tp_pernet(struct net *net) { @@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) } else { /* Socket is owned by kernelspace */ sk = tunnel->sock; + sock_hold(sk); } out: @@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk) } sock_put(sk); } + sock_put(sk); } EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); @@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk struct sk_buff *skbp; struct sk_buff *tmp; u32 ns = L2TP_SKB_CB(skb)->ns; - struct l2tp_stats *sstats; spin_lock_bh(&session->reorder_q.lock); - sstats = &session->stats; skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { if (L2TP_SKB_CB(skbp)->ns > ns) { __skb_queue_before(&session->reorder_q, skbp, skb); @@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", session->name, ns, L2TP_SKB_CB(skbp)->ns, skb_queue_len(&session->reorder_q)); - u64_stats_update_begin(&sstats->syncp); - sstats->rx_oos_packets++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_oos_packets); goto out; } } @@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * { struct l2tp_tunnel *tunnel = session->tunnel; int length = L2TP_SKB_CB(skb)->length; - struct l2tp_stats *tstats, *sstats; /* We're about to requeue the skb, so return resources * to its current owner (a socket receive buffer). */ skb_orphan(skb); - tstats = &tunnel->stats; - u64_stats_update_begin(&tstats->syncp); - sstats = &session->stats; - u64_stats_update_begin(&sstats->syncp); - tstats->rx_packets++; - tstats->rx_bytes += length; - sstats->rx_packets++; - sstats->rx_bytes += length; - u64_stats_update_end(&tstats->syncp); - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&tunnel->stats.rx_packets); + atomic_long_add(length, &tunnel->stats.rx_bytes); + atomic_long_inc(&session->stats.rx_packets); + atomic_long_add(length, &session->stats.rx_bytes); if (L2TP_SKB_CB(skb)->has_seq) { /* Bump our Nr */ @@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) { struct sk_buff *skb; struct sk_buff *tmp; - struct l2tp_stats *sstats; /* If the pkt at the head of the queue has the nr that we * expect to send up next, dequeue it and any other @@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) */ start: spin_lock_bh(&session->reorder_q.lock); - sstats = &session->stats; skb_queue_walk_safe(&session->reorder_q, skb, tmp) { if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { - u64_stats_update_begin(&sstats->syncp); - sstats->rx_seq_discards++; - sstats->rx_errors++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_seq_discards); + atomic_long_inc(&session->stats.rx_errors); l2tp_dbg(session, L2TP_MSG_SEQ, "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", session->name, L2TP_SKB_CB(skb)->ns, @@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, struct l2tp_tunnel *tunnel = session->tunnel; int offset; u32 ns, nr; - struct l2tp_stats *sstats = &session->stats; /* The ref count is increased since we now hold a pointer to * the session. Take care to decrement the refcnt when exiting @@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, "%s: cookie mismatch (%u/%u). Discarding.\n", tunnel->name, tunnel->tunnel_id, session->session_id); - u64_stats_update_begin(&sstats->syncp); - sstats->rx_cookie_discards++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_cookie_discards); goto discard; } ptr += session->peer_cookie_len; @@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, l2tp_warn(session, L2TP_MSG_SEQ, "%s: recv data has no seq numbers when required. Discarding.\n", session->name); - u64_stats_update_begin(&sstats->syncp); - sstats->rx_seq_discards++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_seq_discards); goto discard; } @@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, l2tp_warn(session, L2TP_MSG_SEQ, "%s: recv data has no seq numbers when required. Discarding.\n", session->name); - u64_stats_update_begin(&sstats->syncp); - sstats->rx_seq_discards++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_seq_discards); goto discard; } } @@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * packets */ if (L2TP_SKB_CB(skb)->ns != session->nr) { - u64_stats_update_begin(&sstats->syncp); - sstats->rx_seq_discards++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_seq_discards); l2tp_dbg(session, L2TP_MSG_SEQ, "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", session->name, L2TP_SKB_CB(skb)->ns, @@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, return; discard: - u64_stats_update_begin(&sstats->syncp); - sstats->rx_errors++; - u64_stats_update_end(&sstats->syncp); + atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); if (session->deref) @@ -828,6 +803,23 @@ discard: } EXPORT_SYMBOL(l2tp_recv_common); +/* Drop skbs from the session's reorder_q + */ +int l2tp_session_queue_purge(struct l2tp_session *session) +{ + struct sk_buff *skb = NULL; + BUG_ON(!session); + BUG_ON(session->magic != L2TP_SESSION_MAGIC); + while ((skb = skb_dequeue(&session->reorder_q))) { + atomic_long_inc(&session->stats.rx_errors); + kfree_skb(skb); + if (session->deref) + (*session->deref)(session); + } + return 0; +} +EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); + /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame * here. The skb is not on a list when we get here. * Returns 0 if the packet was a data packet and was successfully passed on. @@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, u32 tunnel_id, session_id; u16 version; int length; - struct l2tp_stats *tstats; if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) goto discard_bad_csum; @@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, discard_bad_csum: LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); - tstats = &tunnel->stats; - u64_stats_update_begin(&tstats->syncp); - tstats->rx_errors++; - u64_stats_update_end(&tstats->syncp); + atomic_long_inc(&tunnel->stats.rx_errors); kfree_skb(skb); return 0; @@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, struct l2tp_tunnel *tunnel = session->tunnel; unsigned int len = skb->len; int error; - struct l2tp_stats *tstats, *sstats; /* Debug */ if (session->send_seq) @@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, error = ip_queue_xmit(skb, fl); /* Update stats */ - tstats = &tunnel->stats; - u64_stats_update_begin(&tstats->syncp); - sstats = &session->stats; - u64_stats_update_begin(&sstats->syncp); if (error >= 0) { - tstats->tx_packets++; - tstats->tx_bytes += len; - sstats->tx_packets++; - sstats->tx_bytes += len; + atomic_long_inc(&tunnel->stats.tx_packets); + atomic_long_add(len, &tunnel->stats.tx_bytes); + atomic_long_inc(&session->stats.tx_packets); + atomic_long_add(len, &session->stats.tx_bytes); } else { - tstats->tx_errors++; - sstats->tx_errors++; + atomic_long_inc(&tunnel->stats.tx_errors); + atomic_long_inc(&session->stats.tx_errors); } - u64_stats_update_end(&tstats->syncp); - u64_stats_update_end(&sstats->syncp); return 0; } @@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) /* No longer an encapsulation socket. See net/ipv4/udp.c */ (udp_sk(sk))->encap_type = 0; (udp_sk(sk))->encap_rcv = NULL; + (udp_sk(sk))->encap_destroy = NULL; break; case L2TP_ENCAPTYPE_IP: break; @@ -1311,7 +1293,7 @@ end: /* When the tunnel is closed, all the attached sessions need to go too. */ -static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) +void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) { int hash; struct hlist_node *walk; @@ -1334,25 +1316,13 @@ again: hlist_del_init(&session->hlist); - /* Since we should hold the sock lock while - * doing any unbinding, we need to release the - * lock we're holding before taking that lock. - * Hold a reference to the sock so it doesn't - * disappear as we're jumping between locks. - */ if (session->ref != NULL) (*session->ref)(session); write_unlock_bh(&tunnel->hlist_lock); - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_del_init_rcu(&session->global_hlist); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); - synchronize_rcu(); - } + __l2tp_session_unhash(session); + l2tp_session_queue_purge(session); if (session->session_close != NULL) (*session->session_close)(session); @@ -1360,6 +1330,8 @@ again: if (session->deref != NULL) (*session->deref)(session); + l2tp_session_dec_refcount(session); + write_lock_bh(&tunnel->hlist_lock); /* Now restart from the beginning of this hash @@ -1372,6 +1344,17 @@ again: } write_unlock_bh(&tunnel->hlist_lock); } +EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); + +/* Tunnel socket destroy hook for UDP encapsulation */ +static void l2tp_udp_encap_destroy(struct sock *sk) +{ + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); + if (tunnel) { + l2tp_tunnel_closeall(tunnel); + sock_put(sk); + } +} /* Really kill the tunnel. * Come here only when all sessions have been cleared from the tunnel. @@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work) return; sock = sk->sk_socket; - BUG_ON(!sock); - /* If the tunnel socket was created directly by the kernel, use the - * sk_* API to release the socket now. Otherwise go through the - * inet_* layer to shut the socket down, and let userspace close it. + /* If the tunnel socket was created by userspace, then go through the + * inet layer to shut the socket down, and let userspace close it. + * Otherwise, if we created the socket directly within the kernel, use + * the sk API to release it here. * In either case the tunnel resources are freed in the socket * destructor when the tunnel socket goes away. */ - if (sock->file == NULL) { - kernel_sock_shutdown(sock, SHUT_RDWR); - sk_release_kernel(sk); + if (tunnel->fd >= 0) { + if (sock) + inet_shutdown(sock, 2); } else { - inet_shutdown(sock, 2); + if (sock) + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sk); } l2tp_tunnel_sock_put(sk); @@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; + udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == PF_INET6) udpv6_encap_enable(); @@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); */ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { + l2tp_tunnel_closeall(tunnel); return (false == queue_work(l2tp_wq, &tunnel->del_work)); } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); @@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); */ void l2tp_session_free(struct l2tp_session *session) { - struct l2tp_tunnel *tunnel; + struct l2tp_tunnel *tunnel = session->tunnel; BUG_ON(atomic_read(&session->ref_count) != 0); - tunnel = session->tunnel; - if (tunnel != NULL) { + if (tunnel) { BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); + if (session->session_id != 0) + atomic_dec(&l2tp_session_count); + sock_put(tunnel->sock); + session->tunnel = NULL; + l2tp_tunnel_dec_refcount(tunnel); + } + + kfree(session); - /* Delete the session from the hash */ + return; +} +EXPORT_SYMBOL_GPL(l2tp_session_free); + +/* Remove an l2tp session from l2tp_core's hash lists. + * Provides a tidyup interface for pseudowire code which can't just route all + * shutdown via. l2tp_session_delete and a pseudowire-specific session_close + * callback. + */ +void __l2tp_session_unhash(struct l2tp_session *session) +{ + struct l2tp_tunnel *tunnel = session->tunnel; + + /* Remove the session from core hashes */ + if (tunnel) { + /* Remove from the per-tunnel hash */ write_lock_bh(&tunnel->hlist_lock); hlist_del_init(&session->hlist); write_unlock_bh(&tunnel->hlist_lock); - /* Unlink from the global hash if not L2TPv2 */ + /* For L2TPv3 we have a per-net hash: remove from there, too */ if (tunnel->version != L2TP_HDR_VER_2) { struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - spin_lock_bh(&pn->l2tp_session_hlist_lock); hlist_del_init_rcu(&session->global_hlist); spin_unlock_bh(&pn->l2tp_session_hlist_lock); synchronize_rcu(); } - - if (session->session_id != 0) - atomic_dec(&l2tp_session_count); - - sock_put(tunnel->sock); - - /* This will delete the tunnel context if this - * is the last session on the tunnel. - */ - session->tunnel = NULL; - l2tp_tunnel_dec_refcount(tunnel); } - - kfree(session); - - return; } -EXPORT_SYMBOL_GPL(l2tp_session_free); +EXPORT_SYMBOL_GPL(__l2tp_session_unhash); /* This function is used by the netlink SESSION_DELETE command and by pseudowire modules. */ int l2tp_session_delete(struct l2tp_session *session) { + if (session->ref) + (*session->ref)(session); + __l2tp_session_unhash(session); + l2tp_session_queue_purge(session); if (session->session_close != NULL) (*session->session_close)(session); - + if (session->deref) + (*session->ref)(session); l2tp_session_dec_refcount(session); - return 0; } EXPORT_SYMBOL_GPL(l2tp_session_delete); - /* We come here whenever a session's send_seq, cookie_len or * l2specific_len parameters are set. */ diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8eb8f1d47f3a..485a490fd990 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -36,16 +36,15 @@ enum { struct sk_buff; struct l2tp_stats { - u64 tx_packets; - u64 tx_bytes; - u64 tx_errors; - u64 rx_packets; - u64 rx_bytes; - u64 rx_seq_discards; - u64 rx_oos_packets; - u64 rx_errors; - u64 rx_cookie_discards; - struct u64_stats_sync syncp; + atomic_long_t tx_packets; + atomic_long_t tx_bytes; + atomic_long_t tx_errors; + atomic_long_t rx_packets; + atomic_long_t rx_bytes; + atomic_long_t rx_seq_discards; + atomic_long_t rx_oos_packets; + atomic_long_t rx_errors; + atomic_long_t rx_cookie_discards; }; struct l2tp_tunnel; @@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); +extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); +extern void __l2tp_session_unhash(struct l2tp_session *session); extern int l2tp_session_delete(struct l2tp_session *session); extern void l2tp_session_free(struct l2tp_session *session); extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); +extern int l2tp_session_queue_purge(struct l2tp_session *session); extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index c3813bc84552..072d7202e182 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, atomic_read(&tunnel->ref_count)); - seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", + seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", tunnel->debug, - (unsigned long long)tunnel->stats.tx_packets, - (unsigned long long)tunnel->stats.tx_bytes, - (unsigned long long)tunnel->stats.tx_errors, - (unsigned long long)tunnel->stats.rx_packets, - (unsigned long long)tunnel->stats.rx_bytes, - (unsigned long long)tunnel->stats.rx_errors); + atomic_long_read(&tunnel->stats.tx_packets), + atomic_long_read(&tunnel->stats.tx_bytes), + atomic_long_read(&tunnel->stats.tx_errors), + atomic_long_read(&tunnel->stats.rx_packets), + atomic_long_read(&tunnel->stats.rx_bytes), + atomic_long_read(&tunnel->stats.rx_errors)); if (tunnel->show != NULL) tunnel->show(m, tunnel); @@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) seq_printf(m, "\n"); } - seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", + seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n", session->nr, session->ns, - (unsigned long long)session->stats.tx_packets, - (unsigned long long)session->stats.tx_bytes, - (unsigned long long)session->stats.tx_errors, - (unsigned long long)session->stats.rx_packets, - (unsigned long long)session->stats.rx_bytes, - (unsigned long long)session->stats.rx_errors); + atomic_long_read(&session->stats.tx_packets), + atomic_long_read(&session->stats.tx_bytes), + atomic_long_read(&session->stats.tx_errors), + atomic_long_read(&session->stats.rx_packets), + atomic_long_read(&session->stats.rx_bytes), + atomic_long_read(&session->stats.rx_errors)); if (session->show != NULL) session->show(m, session); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 7f41b7051269..571db8dd2292 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout) static void l2tp_ip_destroy_sock(struct sock *sk) { struct sk_buff *skb; + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) kfree_skb(skb); + if (tunnel) { + l2tp_tunnel_closeall(tunnel); + sock_put(sk); + } + sk_refcnt_debug_dec(sk); } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 41f2f8126ebc..b8a6039314e8 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout) static void l2tp_ip6_destroy_sock(struct sock *sk) { + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); + lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); + if (tunnel) { + l2tp_tunnel_closeall(tunnel); + sock_put(sk); + } + inet6_destroy_sock(sk); } @@ -683,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, lsa->l2tp_addr = ipv6_hdr(skb)->saddr; lsa->l2tp_flowinfo = 0; lsa->l2tp_scope_id = 0; + lsa->l2tp_conn_id = 0; if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) lsa->l2tp_scope_id = IP6CB(skb)->iif; } diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index c1bab22db85e..0825ff26e113 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np = NULL; #endif - struct l2tp_stats stats; - unsigned int start; hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); @@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla if (nest == NULL) goto nla_put_failure; - do { - start = u64_stats_fetch_begin(&tunnel->stats.syncp); - stats.tx_packets = tunnel->stats.tx_packets; - stats.tx_bytes = tunnel->stats.tx_bytes; - stats.tx_errors = tunnel->stats.tx_errors; - stats.rx_packets = tunnel->stats.rx_packets; - stats.rx_bytes = tunnel->stats.rx_bytes; - stats.rx_errors = tunnel->stats.rx_errors; - stats.rx_seq_discards = tunnel->stats.rx_seq_discards; - stats.rx_oos_packets = tunnel->stats.rx_oos_packets; - } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); - - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, + atomic_long_read(&tunnel->stats.tx_packets)) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, + atomic_long_read(&tunnel->stats.tx_bytes)) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, + atomic_long_read(&tunnel->stats.tx_errors)) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, + atomic_long_read(&tunnel->stats.rx_packets)) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, + atomic_long_read(&tunnel->stats.rx_bytes)) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, - stats.rx_seq_discards) || + atomic_long_read(&tunnel->stats.rx_seq_discards)) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, - stats.rx_oos_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) + atomic_long_read(&tunnel->stats.rx_oos_packets)) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, + atomic_long_read(&tunnel->stats.rx_errors))) goto nla_put_failure; nla_nest_end(skb, nest); @@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; - struct l2tp_stats stats; - unsigned int start; sk = tunnel->sock; @@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl if (nest == NULL) goto nla_put_failure; - do { - start = u64_stats_fetch_begin(&session->stats.syncp); - stats.tx_packets = session->stats.tx_packets; - stats.tx_bytes = session->stats.tx_bytes; - stats.tx_errors = session->stats.tx_errors; - stats.rx_packets = session->stats.rx_packets; - stats.rx_bytes = session->stats.rx_bytes; - stats.rx_errors = session->stats.rx_errors; - stats.rx_seq_discards = session->stats.rx_seq_discards; - stats.rx_oos_packets = session->stats.rx_oos_packets; - } while (u64_stats_fetch_retry(&session->stats.syncp, start)); - - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, + atomic_long_read(&session->stats.tx_packets)) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, + atomic_long_read(&session->stats.tx_bytes)) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, + atomic_long_read(&session->stats.tx_errors)) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, + atomic_long_read(&session->stats.rx_packets)) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, + atomic_long_read(&session->stats.rx_bytes)) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, - stats.rx_seq_discards) || + atomic_long_read(&session->stats.rx_seq_discards)) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, - stats.rx_oos_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) + atomic_long_read(&session->stats.rx_oos_packets)) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, + atomic_long_read(&session->stats.rx_errors))) goto nla_put_failure; nla_nest_end(skb, nest); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6a53371dba1f..637a341c1e2d 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -97,6 +97,7 @@ #include <net/ip.h> #include <net/udp.h> #include <net/xfrm.h> +#include <net/inet_common.h> #include <asm/byteorder.h> #include <linux/atomic.h> @@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int session->name); /* Not bound. Nothing we can do, so discard. */ - session->stats.rx_errors++; + atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } @@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = ps->sock; - struct sk_buff *skb; + struct socket *sock = sk->sk_socket; BUG_ON(session->magic != L2TP_SESSION_MAGIC); - if (session->session_id == 0) - goto out; - - if (sk != NULL) { - lock_sock(sk); - - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { - pppox_unbind_sock(sk); - sk->sk_state = PPPOX_DEAD; - sk->sk_state_change(sk); - } - - /* Purge any queued data */ - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - while ((skb = skb_dequeue(&session->reorder_q))) { - kfree_skb(skb); - sock_put(sk); - } - release_sock(sk); + if (sock) { + inet_shutdown(sock, 2); + /* Don't let the session go away before our socket does */ + l2tp_session_inc_refcount(session); } - -out: return; } @@ -483,19 +466,12 @@ out: */ static void pppol2tp_session_destruct(struct sock *sk) { - struct l2tp_session *session; - - if (sk->sk_user_data != NULL) { - session = sk->sk_user_data; - if (session == NULL) - goto out; - + struct l2tp_session *session = sk->sk_user_data; + if (session) { sk->sk_user_data = NULL; BUG_ON(session->magic != L2TP_SESSION_MAGIC); l2tp_session_dec_refcount(session); } - -out: return; } @@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock) session = pppol2tp_sock_to_session(sk); /* Purge any queued data */ - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); if (session != NULL) { - struct sk_buff *skb; - while ((skb = skb_dequeue(&session->reorder_q))) { - kfree_skb(skb); - sock_put(sk); - } + __l2tp_session_unhash(session); + l2tp_session_queue_purge(session); sock_put(sk); } + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); release_sock(sk); @@ -880,18 +853,6 @@ out: return error; } -/* Called when deleting sessions via the netlink interface. - */ -static int pppol2tp_session_delete(struct l2tp_session *session) -{ - struct pppol2tp_session *ps = l2tp_session_priv(session); - - if (ps->sock == NULL) - l2tp_session_dec_refcount(session); - - return 0; -} - #endif /* CONFIG_L2TP_V3 */ /* getname() support. @@ -1025,14 +986,14 @@ end: static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, struct l2tp_stats *stats) { - dest->tx_packets = stats->tx_packets; - dest->tx_bytes = stats->tx_bytes; - dest->tx_errors = stats->tx_errors; - dest->rx_packets = stats->rx_packets; - dest->rx_bytes = stats->rx_bytes; - dest->rx_seq_discards = stats->rx_seq_discards; - dest->rx_oos_packets = stats->rx_oos_packets; - dest->rx_errors = stats->rx_errors; + dest->tx_packets = atomic_long_read(&stats->tx_packets); + dest->tx_bytes = atomic_long_read(&stats->tx_bytes); + dest->tx_errors = atomic_long_read(&stats->tx_errors); + dest->rx_packets = atomic_long_read(&stats->rx_packets); + dest->rx_bytes = atomic_long_read(&stats->rx_bytes); + dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); + dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); + dest->rx_errors = atomic_long_read(&stats->rx_errors); } /* Session ioctl helper. @@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) tunnel->name, (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', atomic_read(&tunnel->ref_count) - 1); - seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", + seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n", tunnel->debug, - (unsigned long long)tunnel->stats.tx_packets, - (unsigned long long)tunnel->stats.tx_bytes, - (unsigned long long)tunnel->stats.tx_errors, - (unsigned long long)tunnel->stats.rx_packets, - (unsigned long long)tunnel->stats.rx_bytes, - (unsigned long long)tunnel->stats.rx_errors); + atomic_long_read(&tunnel->stats.tx_packets), + atomic_long_read(&tunnel->stats.tx_bytes), + atomic_long_read(&tunnel->stats.tx_errors), + atomic_long_read(&tunnel->stats.rx_packets), + atomic_long_read(&tunnel->stats.rx_bytes), + atomic_long_read(&tunnel->stats.rx_errors)); } static void pppol2tp_seq_session_show(struct seq_file *m, void *v) @@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) session->lns_mode ? "LNS" : "LAC", session->debug, jiffies_to_msecs(session->reorder_timeout)); - seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", + seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n", session->nr, session->ns, - (unsigned long long)session->stats.tx_packets, - (unsigned long long)session->stats.tx_bytes, - (unsigned long long)session->stats.tx_errors, - (unsigned long long)session->stats.rx_packets, - (unsigned long long)session->stats.rx_bytes, - (unsigned long long)session->stats.rx_errors); + atomic_long_read(&session->stats.tx_packets), + atomic_long_read(&session->stats.tx_bytes), + atomic_long_read(&session->stats.tx_errors), + atomic_long_read(&session->stats.rx_packets), + atomic_long_read(&session->stats.rx_bytes), + atomic_long_read(&session->stats.rx_errors)); if (po) seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); @@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = { static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { .session_create = pppol2tp_session_create, - .session_delete = pppol2tp_session_delete, + .session_delete = l2tp_session_delete, }; #endif /* CONFIG_L2TP_V3 */ diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 88709882c464..48aaa89253e0 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, int target; /* Read at least this many bytes */ long timeo; + msg->msg_namelen = 0; + lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 808f5fcd1ced..a6893602f87a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, list_del(&dep->list); mutex_unlock(&local->mtx); - ieee80211_roc_notify_destroy(dep); + ieee80211_roc_notify_destroy(dep, true); return 0; } @@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, ieee80211_start_next_roc(local); mutex_unlock(&local->mtx); - ieee80211_roc_notify_destroy(found); + ieee80211_roc_notify_destroy(found, true); } else { /* work may be pending so use it all the time */ found->abort = true; @@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, /* work will clean up etc */ flush_delayed_work(&found->work); + WARN_ON(!found->to_be_freed); + kfree(found); } return 0; @@ -3290,14 +3292,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, int ret = -ENODATA; rcu_read_lock(); - if (local->use_chanctx) { - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (chanctx_conf) { - *chandef = chanctx_conf->def; - ret = 0; - } - } else if (local->open_count == local->monitors) { - *chandef = local->monitor_chandef; + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (chanctx_conf) { + *chandef = chanctx_conf->def; + ret = 0; + } else if (local->open_count > 0 && + local->open_count == local->monitors && + sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (local->use_chanctx) + *chandef = local->monitor_chandef; + else + cfg80211_chandef_create(chandef, + local->_oper_channel, + local->_oper_channel_type); ret = 0; } rcu_read_unlock(); diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd641..931be419ab5a 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local, enum ieee80211_chanctx_mode mode) { struct ieee80211_chanctx *ctx; + u32 changed; int err; lockdep_assert_held(&local->chanctx_mtx); @@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local, ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; + /* acquire mutex to prevent idle from changing */ + mutex_lock(&local->mtx); + /* turn idle off *before* setting channel -- some drivers need that */ + changed = ieee80211_idle_off(local); + if (changed) + ieee80211_hw_config(local, changed); + if (!local->use_chanctx) { local->_oper_channel_type = cfg80211_get_chandef_type(chandef); @@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local, err = drv_add_chanctx(local, ctx); if (err) { kfree(ctx); - return ERR_PTR(err); + ctx = ERR_PTR(err); + + ieee80211_recalc_idle(local); + goto out; } } + /* and keep the mutex held until the new chanctx is on the list */ list_add_rcu(&ctx->list, &local->chanctx_list); - mutex_lock(&local->mtx); - ieee80211_recalc_idle(local); + out: mutex_unlock(&local->mtx); return ctx; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bada..5672533a0832 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -309,6 +309,7 @@ struct ieee80211_roc_work { struct ieee80211_channel *chan; bool started, abort, hw_begun, notified; + bool to_be_freed; unsigned long hw_start_time; @@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local); void ieee80211_roc_setup(struct ieee80211_local *local); void ieee80211_start_next_roc(struct ieee80211_local *local); void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); void ieee80211_sw_roc_work(struct work_struct *work); void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); @@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type); void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); void ieee80211_remove_interfaces(struct ieee80211_local *local); +u32 ieee80211_idle_off(struct ieee80211_local *local); void ieee80211_recalc_idle(struct ieee80211_local *local); void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 640afab304d7..9ed49ad0380f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); } -static u32 ieee80211_idle_off(struct ieee80211_local *local) +static u32 __ieee80211_idle_off(struct ieee80211_local *local) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; @@ -87,7 +87,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local) return IEEE80211_CONF_CHANGE_IDLE; } -static u32 ieee80211_idle_on(struct ieee80211_local *local) +static u32 __ieee80211_idle_on(struct ieee80211_local *local) { if (local->hw.conf.flags & IEEE80211_CONF_IDLE) return 0; @@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local) return IEEE80211_CONF_CHANGE_IDLE; } -void ieee80211_recalc_idle(struct ieee80211_local *local) +static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, + bool force_active) { bool working = false, scanning, active; unsigned int led_trig_start = 0, led_trig_stop = 0; struct ieee80211_roc_work *roc; - u32 change; lockdep_assert_held(&local->mtx); - active = !list_empty(&local->chanctx_list) || local->monitors; + active = force_active || + !list_empty(&local->chanctx_list) || + local->monitors; if (!local->ops->remain_on_channel) { list_for_each_entry(roc, &local->roc_list, list) { @@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local) ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); if (working || scanning || active) - change = ieee80211_idle_off(local); - else - change = ieee80211_idle_on(local); + return __ieee80211_idle_off(local); + return __ieee80211_idle_on(local); +} + +u32 ieee80211_idle_off(struct ieee80211_local *local) +{ + return __ieee80211_recalc_idle(local, true); +} + +void ieee80211_recalc_idle(struct ieee80211_local *local) +{ + u32 change = __ieee80211_recalc_idle(local, false); if (change) ieee80211_hw_config(local, change); } @@ -349,21 +360,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; - int ret = 0; + int ret; if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) return 0; - mutex_lock(&local->iflist_mtx); + ASSERT_RTNL(); if (local->monitor_sdata) - goto out_unlock; + return 0; sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); - if (!sdata) { - ret = -ENOMEM; - goto out_unlock; - } + if (!sdata) + return -ENOMEM; /* set up data */ sdata->local = local; @@ -377,13 +386,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) if (WARN_ON(ret)) { /* ok .. stupid driver, it asked for this! */ kfree(sdata); - goto out_unlock; + return ret; } ret = ieee80211_check_queues(sdata); if (ret) { kfree(sdata); - goto out_unlock; + return ret; } ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, @@ -391,13 +400,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) if (ret) { drv_remove_interface(local, sdata); kfree(sdata); - goto out_unlock; + return ret; } + mutex_lock(&local->iflist_mtx); rcu_assign_pointer(local->monitor_sdata, sdata); - out_unlock: mutex_unlock(&local->iflist_mtx); - return ret; + + return 0; } static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) @@ -407,14 +417,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) return; + ASSERT_RTNL(); + mutex_lock(&local->iflist_mtx); sdata = rcu_dereference_protected(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx)); - if (!sdata) - goto out_unlock; + if (!sdata) { + mutex_unlock(&local->iflist_mtx); + return; + } rcu_assign_pointer(local->monitor_sdata, NULL); + mutex_unlock(&local->iflist_mtx); + synchronize_net(); ieee80211_vif_release_channel(sdata); @@ -422,8 +438,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) drv_remove_interface(local, sdata); kfree(sdata); - out_unlock: - mutex_unlock(&local->iflist_mtx); } /* @@ -541,6 +555,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); netif_carrier_on(dev); break; @@ -812,6 +829,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, ieee80211_adjust_monitor_flags(sdata, -1); ieee80211_configure_filter(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); break; case NL80211_IFTYPE_P2P_DEVICE: /* relies on synchronize_rcu() below */ diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7b..4749b3858695 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) - if (ieee80211_vif_is_mesh(&sdata->vif)) + if (ieee80211_vif_is_mesh(&sdata->vif) && + ieee80211_sdata_running(sdata)) ieee80211_queue_work(&local->hw, &sdata->work); rcu_read_unlock(); } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05f..346ad4cfb013 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & mask) >> shift; + if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) + continue; + switch (ap_mcs) { default: if (our_mcs <= ap_mcs) @@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* + * Stop timers before deleting work items, as timers + * could race and re-add the work-items. They will be + * re-established on connection. + */ + del_timer_sync(&ifmgd->conn_mon_timer); + del_timer_sync(&ifmgd->bcn_mon_timer); + + /* * we need to use atomic bitops for the running bits * only because both timers might fire at the same * time -- the code here is properly synchronised. @@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) if (del_timer_sync(&ifmgd->timer)) set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); - cancel_work_sync(&ifmgd->chswitch_work); if (del_timer_sync(&ifmgd->chswitch_timer)) set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); - - /* these will just be re-established on connection */ - del_timer_sync(&ifmgd->conn_mon_timer); - del_timer_sync(&ifmgd->bcn_mon_timer); + cancel_work_sync(&ifmgd->chswitch_work); } void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) @@ -3601,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) /* Restart STA timers */ rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) - ieee80211_restart_sta_timer(sdata); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (ieee80211_sdata_running(sdata)) + ieee80211_restart_sta_timer(sdata); + } rcu_read_unlock(); } @@ -3955,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data; - if (ifmgd->associated) - ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + __cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); + } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -4016,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) - ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + __cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); + } if (ifmgd->auth_data && !ifmgd->auth_data->done) { err = -EBUSY; @@ -4315,6 +4340,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* + * Make sure some work items will not run after this, + * they will not do anything but might not have been + * cancelled when disconnecting. + */ + cancel_work_sync(&ifmgd->monitor_work); + cancel_work_sync(&ifmgd->beacon_connection_loss_work); + cancel_work_sync(&ifmgd->request_smps_work); + cancel_work_sync(&ifmgd->csa_connection_drop_work); + cancel_work_sync(&ifmgd->chswitch_work); + mutex_lock(&ifmgd->mtx); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e821..430bd254e496 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) } } -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) { struct ieee80211_roc_work *dep, *tmp; + if (WARN_ON(roc->to_be_freed)) + return; + /* was never transmitted */ if (roc->frame) { cfg80211_mgmt_tx_status(&roc->sdata->wdev, @@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) GFP_KERNEL); list_for_each_entry_safe(dep, tmp, &roc->dependents, list) - ieee80211_roc_notify_destroy(dep); + ieee80211_roc_notify_destroy(dep, true); - kfree(roc); + if (free) + kfree(roc); + else + roc->to_be_freed = true; } void ieee80211_sw_roc_work(struct work_struct *work) @@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work) mutex_lock(&local->mtx); + if (roc->to_be_freed) + goto out_unlock; + if (roc->abort) goto finish; @@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) finish: list_del(&roc->list); started = roc->started; - ieee80211_roc_notify_destroy(roc); + ieee80211_roc_notify_destroy(roc, !roc->abort); if (started) { drv_flush(local, false); @@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work) list_del(&roc->list); - ieee80211_roc_notify_destroy(roc); + ieee80211_roc_notify_destroy(roc, true); /* if there's another roc, start it now */ ieee80211_start_next_roc(local); @@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) list_for_each_entry_safe(roc, tmp, &tmp_list, list) { if (local->ops->remain_on_channel) { list_del(&roc->list); - ieee80211_roc_notify_destroy(roc); + ieee80211_roc_notify_destroy(roc, true); } else { ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); /* work will clean up etc */ flush_delayed_work(&roc->work); + WARN_ON(!roc->to_be_freed); + kfree(roc); } } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b9..c6844ad080be 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) memset(nskb->cb, 0, sizeof(nskb->cb)); - ieee80211_tx_skb(rx->sdata, nskb); + if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); + + info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | + IEEE80211_TX_INTFL_OFFCHAN_TX_OK | + IEEE80211_TX_CTL_NO_CCK_RATE; + if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) + info->hw_queue = + local->hw.offchannel_tx_hw_queue; + } + + __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, + status->band); } dev_kfree_skb(rx->skb); return RX_QUEUED; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb50..238a0cca320e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; int ret, i; + bool have_key = false; might_sleep(); @@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta) list_del_rcu(&sta->list); mutex_lock(&local->key_mtx); - for (i = 0; i < NUM_DEFAULT_KEYS; i++) + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); - if (sta->ptk) + have_key = true; + } + if (sta->ptk) { __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); + have_key = true; + } mutex_unlock(&local->key_mtx); + if (!have_key) + synchronize_net(); + sta->dead = true; local->num_sta--; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce78d1149f1d..8914d2d2881a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2745,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, cpu_to_le16(IEEE80211_FCTL_MOREDATA); } - sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, skb)) break; dev_kfree_skb_any(skb); diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 0f92dc24cb89..d7df6ac2c6f1 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set, nla_put_failure: nla_nest_cancel(skb, nested); ipset_nest_end(skb, atd); - return -EMSGSIZE; + if (unlikely(id == first)) { + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; } static int diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index f2627226a087..10a30b4fc7db 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags) dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } +static inline void +hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } +} + static inline int hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) { @@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags) dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } +static inline void +hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } +} + static inline int hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) { diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 4b677cf6bf7d..d6a59154d710 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst, static inline void hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) { - dst->nomatch = flags & IPSET_FLAG_NOMATCH; + dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } } static inline int @@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst, static inline void hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) { - dst->nomatch = flags & IPSET_FLAG_NOMATCH; + dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } } static inline int diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 6ba985f1c96f..f2b0a3c30130 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst, static inline void hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) { - dst->nomatch = flags & IPSET_FLAG_NOMATCH; + dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } } static inline int @@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst, static inline void hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) { - dst->nomatch = flags & IPSET_FLAG_NOMATCH; + dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } static inline int @@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem) } static inline void +hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } +} + +static inline void hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) { elem->elem = 0; diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index af20c0c5ced2..349deb672a2d 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags) dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } +static inline void +hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } +} + static inline int hash_netport4_data_match(const struct hash_netport4_elem *elem) { @@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags) dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); } +static inline void +hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags) +{ + if (dst->nomatch) { + *flags = IPSET_FLAG_NOMATCH; + dst->nomatch = 0; + } +} + static inline int hash_netport6_data_match(const struct hash_netport6_elem *elem) { diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 8371c2bac2e4..09c744aa8982 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, { const struct set_elem *e = list_set_elem(map, i); - if (i == map->size - 1 && e->id != IPSET_INVALID_ID) - /* Last element replaced: e.g. add new,before,last */ - ip_set_put_byindex(e->id); + if (e->id != IPSET_INVALID_ID) { + const struct set_elem *x = list_set_elem(map, map->size - 1); + + /* Last element replaced or pushed off */ + if (x->id != IPSET_INVALID_ID) + ip_set_put_byindex(x->id); + } if (with_timeout(map->timeout)) list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); else diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 47edf5a40a59..61f49d241712 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) skb_reset_network_header(skb); IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); - rcu_read_lock(); ipv4_update_pmtu(skb, dev_net(skb->dev), mtu, 0, 0, 0, 0); - rcu_read_unlock(); /* Client uses PMTUD? */ if (!(cih->frag_off & htons(IP_DF))) goto ignore_ipip; @@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } /* ipvs enabled in this netns ? */ net = skb_net(skb); - if (!net_ipvs(net)->enable) + ipvs = net_ipvs(net); + if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) return NF_ACCEPT; ip_vs_fill_iph_skb(af, skb, &iph); @@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); - ipvs = net_ipvs(net); /* Check the server status */ if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ @@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, { int r; struct net *net; + struct netns_ipvs *ipvs; if (ip_hdr(skb)->protocol != IPPROTO_ICMP) return NF_ACCEPT; /* ipvs enabled in this netns ? */ net = skb_net(skb); - if (!net_ipvs(net)->enable) + ipvs = net_ipvs(net); + if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) return NF_ACCEPT; return ip_vs_in_icmp(skb, &r, hooknum); @@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, { int r; struct net *net; + struct netns_ipvs *ipvs; struct ip_vs_iphdr iphdr; ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); @@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, /* ipvs enabled in this netns ? */ net = skb_net(skb); - if (!net_ipvs(net)->enable) + ipvs = net_ipvs(net); + if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) return NF_ACCEPT; return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c68198bf9128..9e2d1cccd1eb 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "backup_only", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", @@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; ipvs->sysctl_pmtu_disc = 1; tbl[idx++].data = &ipvs->sysctl_pmtu_disc; + tbl[idx++].data = &ipvs->sysctl_backup_only; ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index ae8ec6f27688..cd1d7298f7ba 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, sctp_chunkhdr_t _sctpch, *sch; unsigned char chunk_type; int event, next_state; - int ihl; + int ihl, cofs; #ifdef CONFIG_IP_VS_IPV6 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); @@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, ihl = ip_hdrlen(skb); #endif - sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), - sizeof(_sctpch), &_sctpch); + cofs = ihl + sizeof(sctp_sctphdr_t); + sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch); if (sch == NULL) return; @@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, */ if ((sch->type == SCTP_CID_COOKIE_ECHO) || (sch->type == SCTP_CID_COOKIE_ACK)) { - sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + - sch->length), sizeof(_sctpch), &_sctpch); - if (sch) { - if (sch->type == SCTP_CID_ABORT) + int clen = ntohs(sch->length); + + if (clen >= sizeof(sctp_chunkhdr_t)) { + sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), + sizeof(_sctpch), &_sctpch); + if (sch && sch->type == SCTP_CID_ABORT) chunk_type = sch->type; } } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a9740bd6fe54..94b4b9853f60 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, { const struct nf_conn_help *help; const struct nf_conntrack_helper *helper; + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; /* Called from the helper function, this call never fails */ help = nfct_help(ct); @@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, helper = rcu_dereference(help->helper); nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, - "nf_ct_%s: dropping packet: %s ", helper->name, fmt); + "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); + + va_end(args); } EXPORT_SYMBOL_GPL(nf_ct_helper_log); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 432f95780003..ba65b2041eb4 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void) { int ret; + ret = register_pernet_subsys(&dccp_net_ops); + if (ret < 0) + goto out_pernet; + ret = nf_ct_l4proto_register(&dccp_proto4); if (ret < 0) goto out_dccp4; @@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void) if (ret < 0) goto out_dccp6; - ret = register_pernet_subsys(&dccp_net_ops); - if (ret < 0) - goto out_pernet; - return 0; -out_pernet: - nf_ct_l4proto_unregister(&dccp_proto6); out_dccp6: nf_ct_l4proto_unregister(&dccp_proto4); out_dccp4: + unregister_pernet_subsys(&dccp_net_ops); +out_pernet: return ret; } diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index bd7d01d9c7e7..155ce9f8a0db 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void) { int ret; - ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); - if (ret < 0) - goto out_gre4; - ret = register_pernet_subsys(&proto_gre_net_ops); if (ret < 0) goto out_pernet; + ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); + if (ret < 0) + goto out_gre4; + return 0; -out_pernet: - nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4); out_gre4: + unregister_pernet_subsys(&proto_gre_net_ops); +out_pernet: return ret; } diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 480f616d5936..ec83536def9a 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void) { int ret; + ret = register_pernet_subsys(&sctp_net_ops); + if (ret < 0) + goto out_pernet; + ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); if (ret < 0) goto out_sctp4; @@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void) if (ret < 0) goto out_sctp6; - ret = register_pernet_subsys(&sctp_net_ops); - if (ret < 0) - goto out_pernet; - return 0; -out_pernet: - nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6); out_sctp6: nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); out_sctp4: + unregister_pernet_subsys(&sctp_net_ops); +out_pernet: return ret; } diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 157489581c31..ca969f6273f7 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void) { int ret; + ret = register_pernet_subsys(&udplite_net_ops); + if (ret < 0) + goto out_pernet; + ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); if (ret < 0) goto out_udplite4; @@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void) if (ret < 0) goto out_udplite6; - ret = register_pernet_subsys(&udplite_net_ops); - if (ret < 0) - goto out_pernet; - return 0; -out_pernet: - nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6); out_udplite6: nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); out_udplite4: + unregister_pernet_subsys(&udplite_net_ops); +out_pernet: return ret; } diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 0e7d423324c3..e0c4373b4747 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, end += strlen("\r\n\r\n") + clen; msglen = origlen = end - dptr; - if (msglen > datalen) { - nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); - return NF_DROP; - } + if (msglen > datalen) + return NF_ACCEPT; ret = process_sip_msg(skb, ct, protoff, dataoff, &dptr, &msglen); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1c..fedee3943661 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void) register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); if (!nf_ct_netfilter_header) { pr_err("nf_conntrack: can't register to sysctl.\n"); + ret = -ENOMEM; goto out_sysctl; } #endif diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 8d5769c6d16e..ad24be070e53 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet); struct nf_nat_proto_clean { u8 l3proto; u8 l4proto; - bool hash; }; -/* Clear NAT section of all conntracks, in case we're loaded again. */ -static int nf_nat_proto_clean(struct nf_conn *i, void *data) +/* kill conntracks with affected NAT section */ +static int nf_nat_proto_remove(struct nf_conn *i, void *data) { const struct nf_nat_proto_clean *clean = data; struct nf_conn_nat *nat = nfct_nat(i); if (!nat) return 0; - if (!(i->status & IPS_SRC_NAT_DONE)) - return 0; + if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) return 0; - if (clean->hash) { - spin_lock_bh(&nf_nat_lock); - hlist_del_rcu(&nat->bysource); - spin_unlock_bh(&nf_nat_lock); - } else { - memset(nat, 0, sizeof(*nat)); - i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | - IPS_SEQ_ADJUST); - } - return 0; + return i->status & IPS_NAT_MASK ? 1 : 0; } static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) @@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) struct net *net; rtnl_lock(); - /* Step 1 - remove from bysource hash */ - clean.hash = true; for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); - synchronize_rcu(); - - /* Step 2 - clean NAT section */ - clean.hash = false; - for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); rtnl_unlock(); } @@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto) struct net *net; rtnl_lock(); - /* Step 1 - remove from bysource hash */ - clean.hash = true; - for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); - synchronize_rcu(); - /* Step 2 - clean NAT section */ - clean.hash = false; for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); rtnl_unlock(); } @@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) { struct nf_nat_proto_clean clean = {}; - nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); + nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); synchronize_rcu(); nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec251712..0b1b32cda307 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id) } EXPORT_SYMBOL_GPL(nfnl_unlock); -static struct mutex *nfnl_get_lock(__u8 subsys_id) -{ - return &table[subsys_id].mutex; -} - int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) { nfnl_lock(n->subsys_id); @@ -199,7 +194,7 @@ replay: rcu_read_unlock(); nfnl_lock(subsys_id); if (rcu_dereference_protected(table[subsys_id].subsys, - lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || + lockdep_is_held(&table[subsys_id].mutex)) != ss || nfnetlink_find_client(type, ss) != nc) err = -EAGAIN; else if (nc->call) diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4c..dc3fd5d44464 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, return -EINVAL; acct_name = nla_data(tb[NFACCT_NAME]); + if (strlen(acct_name) == 0) + return -EINVAL; list_for_each_entry(nfacct, &nfnl_acct_list, head) { if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 858fd52c1040..42680b2baa11 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid) inst->queue_num = queue_num; inst->peer_portid = portid; inst->queue_maxlen = NFQNL_QMAX_DEFAULT; - inst->copy_range = 0xfffff; + inst->copy_range = 0xffff; inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); @@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void) #ifdef CONFIG_PROC_FS if (!proc_create("nfnetlink_queue", 0440, - proc_net_netfilter, &nfqnl_file_ops)) + proc_net_netfilter, &nfqnl_file_ops)) { + status = -ENOMEM; goto cleanup_subsys; + } #endif register_netdevice_notifier(&nfqnl_dev_notifier); diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f3..3228d7f24eb4 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c @@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_audit_info *info = par->targinfo; struct audit_buffer *ab; + if (audit_enabled == 0) + goto errout; + ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); if (ab == NULL) goto errout; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4de..8a6c6ea466d8 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlbl_unlhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; - u32 skip_addr4 = cb->args[2]; - u32 skip_addr6 = cb->args[3]; u32 iter_bkt; u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; struct netlbl_unlhsh_iface *iface; @@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, continue; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < skip_addr4) + if (iter_addr4++ < cb->args[2]) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < skip_addr6) + if (iter_addr6++ < cb->args[3]) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, unlabel_staticlist_return: rcu_read_unlock(); - cb->args[0] = skip_bkt; - cb->args[1] = skip_chain; - cb->args[2] = skip_addr4; - cb->args[3] = skip_addr6; + cb->args[0] = iter_bkt; + cb->args[1] = iter_chain; + cb->args[2] = iter_addr4; + cb->args[3] = iter_addr6; return skb->len; } @@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, { struct netlbl_unlhsh_walk_arg cb_arg; struct netlbl_unlhsh_iface *iface; - u32 skip_addr4 = cb->args[0]; - u32 skip_addr6 = cb->args[1]; - u32 iter_addr4 = 0; + u32 iter_addr4 = 0, iter_addr6 = 0; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) - u32 iter_addr6 = 0; struct netlbl_af6list *addr6; #endif @@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, goto unlabel_staticlistdef_return; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < skip_addr4) + if (iter_addr4++ < cb->args[0]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, } #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < skip_addr6) + if (iter_addr6++ < cb->args[1]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, unlabel_staticlistdef_return: rcu_read_unlock(); - cb->args[0] = skip_addr4; - cb->args[1] = skip_addr6; + cb->args[0] = iter_addr4; + cb->args[1] = iter_addr6; return skb->len; } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f2aabb6f4105..5a55be3f17a5 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family, int err = 0; BUG_ON(grp->name[0] == '\0'); + BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); genl_lock(); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1fa1d9ffd2e..103bd704b5fc 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, } if (sax != NULL) { + memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 7f8266dd14cb..ee25f25f0cd6 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) } } -static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) +static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, + int err) { struct sock *sk; struct hlist_node *tmp; @@ -100,11 +101,12 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) nfc_llcp_accept_unlink(accept_sk); + if (err) + accept_sk->sk_err = err; accept_sk->sk_state = LLCP_CLOSED; + accept_sk->sk_state_change(sk); bh_unlock_sock(accept_sk); - - sock_orphan(accept_sk); } if (listen == true) { @@ -123,16 +125,45 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) continue; } + if (err) + sk->sk_err = err; sk->sk_state = LLCP_CLOSED; + sk->sk_state_change(sk); bh_unlock_sock(sk); - sock_orphan(sk); - sk_del_node_init(sk); } write_unlock(&local->sockets.lock); + + /* + * If we want to keep the listening sockets alive, + * we don't touch the RAW ones. + */ + if (listen == true) + return; + + write_lock(&local->raw_sockets.lock); + + sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { + llcp_sock = nfc_llcp_sock(sk); + + bh_lock_sock(sk); + + nfc_llcp_socket_purge(llcp_sock); + + if (err) + sk->sk_err = err; + sk->sk_state = LLCP_CLOSED; + sk->sk_state_change(sk); + + bh_unlock_sock(sk); + + sk_del_node_init(sk); + } + + write_unlock(&local->raw_sockets.lock); } struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) @@ -142,20 +173,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) return local; } -static void local_release(struct kref *ref) +static void local_cleanup(struct nfc_llcp_local *local, bool listen) { - struct nfc_llcp_local *local; - - local = container_of(ref, struct nfc_llcp_local, ref); - - list_del(&local->list); - nfc_llcp_socket_release(local, false); + nfc_llcp_socket_release(local, listen, ENXIO); del_timer_sync(&local->link_timer); skb_queue_purge(&local->tx_queue); cancel_work_sync(&local->tx_work); cancel_work_sync(&local->rx_work); cancel_work_sync(&local->timeout_work); kfree_skb(local->rx_pending); +} + +static void local_release(struct kref *ref) +{ + struct nfc_llcp_local *local; + + local = container_of(ref, struct nfc_llcp_local, ref); + + list_del(&local->list); + local_cleanup(local, false); kfree(local); } @@ -785,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, skb_get(skb); } else { pr_err("Receive queue is full\n"); - kfree_skb(skb); } nfc_llcp_sock_put(llcp_sock); @@ -986,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, skb_get(skb); } else { pr_err("Receive queue is full\n"); - kfree_skb(skb); } } @@ -1348,7 +1382,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev) return; /* Close and purge all existing sockets */ - nfc_llcp_socket_release(local, true); + nfc_llcp_socket_release(local, true, 0); } void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, @@ -1427,6 +1461,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev) return; } + local_cleanup(local, false); + nfc_llcp_local_put(local); } diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5332751943a9..6c94447ec414 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c @@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, } if (sk->sk_state == LLCP_CONNECTED || !newsock) { - nfc_llcp_accept_unlink(sk); + list_del_init(&lsk->accept_queue); + sock_put(sk); + if (newsock) sock_graft(sk, newsock); @@ -278,6 +280,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, pr_debug("Returning sk state %d\n", sk->sk_state); + sk_acceptq_removed(parent); + return sk; } @@ -462,8 +466,6 @@ static int llcp_sock_release(struct socket *sock) nfc_llcp_accept_unlink(accept_sk); release_sock(accept_sk); - - sock_orphan(accept_sk); } } @@ -644,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, pr_debug("%p %zu\n", sk, len); + msg->msg_namelen = 0; + lock_sock(sk); if (sk->sk_state == LLCP_CLOSED && @@ -689,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); + memset(sockaddr, 0, sizeof(*sockaddr)); sockaddr->sa_family = AF_NFC; sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; sockaddr->dsap = ui_cb->dsap; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ac2defeeba83..d4d5363c7ba7 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); *current_tci = vhdr->h_vlan_TCI; @@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_add(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); } __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e87a26506dba..6980c3e6f066 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, skb_copy_and_csum_dev(skb, nla_data(nla)); + genlmsg_end(user_skb, upcall); err = genlmsg_unicast(net, user_skb, upcall_info->portid); out: @@ -1592,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, return ERR_PTR(-ENOMEM); retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); - if (retval < 0) { - kfree_skb(skb); - return ERR_PTR(retval); - } + BUG_ON(retval < 0); + return skb; } @@ -1690,6 +1689,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(vport)) goto exit_unlock; + err = 0; reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { @@ -1724,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) err = -EINVAL; + reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!reply) { + err = -ENOMEM; + goto exit_unlock; + } + if (!err && a[OVS_VPORT_ATTR_OPTIONS]) err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); if (err) - goto exit_unlock; + goto exit_free; + if (a[OVS_VPORT_ATTR_UPCALL_PID]) vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); - reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, - OVS_VPORT_CMD_NEW); - if (IS_ERR(reply)) { - netlink_set_err(sock_net(skb->sk)->genl_sock, 0, - ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); - goto exit_unlock; - } + err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, + info->snd_seq, 0, OVS_VPORT_CMD_NEW); + BUG_ON(err < 0); genl_notify(reply, genl_info_net(info), info->snd_portid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + rtnl_unlock(); + return 0; + +exit_free: + kfree_skb(reply); exit_unlock: rtnl_unlock(); return err; @@ -1771,6 +1779,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(reply)) goto exit_unlock; + err = 0; ovs_dp_detach_port(vport); genl_notify(reply, genl_info_net(info), info->snd_portid, diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 20605ecf100b..67a2b783fe70 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb) return htons(ETH_P_802_2); __skb_pull(skb, sizeof(struct llc_snap_hdr)); - return llc->ethertype; + + if (ntohs(llc->ethertype) >= 1536) + return llc->ethertype; + + return htons(ETH_P_802_2); } static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, @@ -791,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { + BUG_ON(table->count == 0); hlist_del_rcu(&flow->hash_node[table->node_ver]); table->count--; - BUG_ON(table->count < 0); } /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 670cbc3518de..2130d61c384a 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). - * (No one comes after us, since we tell handle_bridge() that we took - * the packet.) */ + */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ba717cc038b3..f6b8132ce4cb 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) * @skb: skb that was received * * Must be called with rcu_read_lock. The packet cannot be shared and - * skb->data should point to the Ethernet header. The caller must have already - * called compute_ip_summed() to initialize the checksumming fields. + * skb->data should point to the Ethernet header. */ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) { diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b90..73be187d389e 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c @@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, for (i = 0; i < nr; i++) { BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); + ctr.name[sizeof(ctr.name) - 1] = '\0'; ctr.value = values[i]; rds_info_copy(iter, &ctr, sizeof(ctr)); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf68e6e4054a..9c8347451597 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { + memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 1135d8227f9b..9b97172db84a 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, if (err < 0) return err; - err = -EINVAL; if (tb[TCA_FW_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); tcf_bind_filter(tp, &f->res, base); @@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, } #endif /* CONFIG_NET_CLS_IND */ + err = -EINVAL; if (tb[TCA_FW_MASK]) { mask = nla_get_u32(tb[TCA_FW_MASK]); if (mask != head->mask) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ffb..1bc210ffcba2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch) cbq_update(q); if ((incr -= incr2) < 0) incr = 0; + q->now += incr; + } else { + if (now > q->now) + q->now = now; } - q->now += incr; q->now_rt = now; for (;;) { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb2534..55786283a3df 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow->deficit = q->quantum; flow->dropped = 0; } - if (++sch->q.qlen < sch->limit) + if (++sch->q.qlen <= sch->limit) return NET_XMIT_SUCCESS; q->drop_overlimit++; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a22..eac7e0ee23c1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) u64 mult; int shift; - r->rate_bps = rate << 3; + r->rate_bps = (u64)rate << 3; r->shift = 0; r->mult = 1; /* diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3d..d51852bba01c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, new_num_classes == q->max_agg_classes - 1) /* agg no more full */ hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); + /* The next assignment may let + * agg->initial_budget > agg->budgetmax + * hold, we will take it into account in charge_actual_service(). + */ agg->budgetmax = new_num_classes * agg->lmax; new_agg_weight = agg->class_weight * new_num_classes; agg->inv_w = ONE_FP/new_agg_weight; @@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q) unsigned long old_vslot = q->oldV >> q->min_slot_shift; if (vslot != old_vslot) { - unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; + unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; qfq_move_groups(q, mask, IR, ER); qfq_move_groups(q, mask, IB, EB); } @@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, /* Update F according to the actual service received by the aggregate. */ static inline void charge_actual_service(struct qfq_aggregate *agg) { - /* compute the service received by the aggregate */ - u32 service_received = agg->initial_budget - agg->budget; + /* Compute the service received by the aggregate, taking into + * account that, after decreasing the number of classes in + * agg, it may happen that + * agg->initial_budget - agg->budget > agg->bugdetmax + */ + u32 service_received = min(agg->budgetmax, + agg->initial_budget - agg->budget); agg->F = agg->S + (u64)service_received * agg->inv_w; } +static inline void qfq_update_agg_ts(struct qfq_sched *q, + struct qfq_aggregate *agg, + enum update_reason reason); + +static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); + static struct sk_buff *qfq_dequeue(struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); @@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) in_serv_agg->initial_budget = in_serv_agg->budget = in_serv_agg->budgetmax; - if (!list_empty(&in_serv_agg->active)) + if (!list_empty(&in_serv_agg->active)) { /* * Still active: reschedule for * service. Possible optimization: if no other @@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) * handle it, we would need to maintain an * extra num_active_aggs field. */ - qfq_activate_agg(q, in_serv_agg, requeue); - else if (sch->q.qlen == 0) { /* no aggregate to serve */ + qfq_update_agg_ts(q, in_serv_agg, requeue); + qfq_schedule_agg(q, in_serv_agg); + } else if (sch->q.qlen == 0) { /* no aggregate to serve */ q->in_serv_agg = NULL; return NULL; } @@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) qdisc_bstats_update(sch, skb); agg_dequeue(in_serv_agg, cl, len); - in_serv_agg->budget -= len; + /* If lmax is lowered, through qfq_change_class, for a class + * owning pending packets with larger size than the new value + * of lmax, then the following condition may hold. + */ + if (unlikely(in_serv_agg->budget < len)) + in_serv_agg->budget = 0; + else + in_serv_agg->budget -= len; + q->V += (u64)len * IWSUM; pr_debug("qfq dequeue: len %u F %lld now %lld\n", len, (unsigned long long) in_serv_agg->F, @@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = agg->lmax; list_add_tail(&cl->alist, &agg->active); - if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) - return err; /* aggregate was not empty, nothing else to do */ + if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || + q->in_serv_agg == agg) + return err; /* non-empty or in service, nothing else to do */ - /* recharge budget */ - agg->initial_budget = agg->budget = agg->budgetmax; - - qfq_update_agg_ts(q, agg, enqueue); - if (q->in_serv_agg == NULL) - q->in_serv_agg = agg; - else if (agg != q->in_serv_agg) - qfq_schedule_agg(q, agg); + qfq_activate_agg(q, agg, enqueue); return err; } @@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) /* group was surely ineligible, remove */ __clear_bit(grp->index, &q->bitmaps[IR]); __clear_bit(grp->index, &q->bitmaps[IB]); - } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) + } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && + q->in_serv_agg == NULL) q->V = roundedS; grp->S = roundedS; @@ -1284,8 +1303,15 @@ skip_update: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, enum update_reason reason) { + agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ + qfq_update_agg_ts(q, agg, reason); - qfq_schedule_agg(q, agg); + if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ + q->in_serv_agg = agg; /* start serving this aggregate */ + /* update V: to be in service, agg must be eligible */ + q->oldV = q->V = agg->S; + } else if (agg != q->in_serv_agg) + qfq_schedule_agg(q, agg); } static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, @@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) __set_bit(grp->index, &q->bitmaps[s]); } } - - qfq_update_eligible(q); } static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 43cd0dd9149d..d2709e2b7be6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, transports) { if (transport == active) - break; + continue; list_for_each_entry(chunk, &transport->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5131fcfedb03..de1a0138317f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, } /* Delete the tempory new association. */ - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); /* Restore association pointer to provide SCTP command interpeter diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f7d34e7b6f81..5ead60550895 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd, else { int N, i; + /* + * NOTE: we skip uid_valid()/gid_valid() checks here: + * instead, * -1 id's are later mapped to the + * (export-specific) anonymous id by nfsd_setuser. + * + * (But supplementary gid's get no such special + * treatment so are checked for validity here.) + */ /* uid */ rsci.cred.cr_uid = make_kuid(&init_user_ns, id); - if (!uid_valid(rsci.cred.cr_uid)) - goto out; /* gid */ if (get_int(&mesg, &id)) goto out; rsci.cred.cr_gid = make_kgid(&init_user_ns, id); - if (!gid_valid(rsci.cred.cr_gid)) - goto out; /* number of additional gid's */ if (get_int(&mesg, &N)) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dcc446e7fbf6..d5f35f15af98 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru err = rpciod_up(); if (err) goto out_no_rpciod; - err = -EINVAL; - if (!xprt) - goto out_no_xprt; + err = -EINVAL; if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; @@ -382,10 +380,9 @@ out_no_principal: out_no_stats: kfree(clnt); out_err: - xprt_put(xprt); -out_no_xprt: rpciod_down(); out_no_rpciod: + xprt_put(xprt); return ERR_PTR(err); } @@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new = rpc_new_client(args, xprt); if (IS_ERR(new)) { err = PTR_ERR(new); - goto out_put; + goto out_err; } atomic_inc(&clnt->cl_count); @@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new->cl_chatty = clnt->cl_chatty; return new; -out_put: - xprt_put(xprt); out_err: dprintk("RPC: %s: returned error %d\n", __func__, err); return ERR_PTR(err); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index a0f48a51e14e..a9129f8d7070 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1175,6 +1175,7 @@ static struct file_system_type rpc_pipe_fs_type = { .kill_sb = rpc_kill_sb, }; MODULE_ALIAS_FS("rpc_pipefs"); +MODULE_ALIAS("rpc_pipefs"); static void init_once(void *foo) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index fb20f25ddec9..f8529fc8e542 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; queue->qlen++; + /* barrier matches the read in rpc_wake_up_task_queue_locked() */ + smp_wmb(); rpc_set_queued(task); dprintk("RPC: %5u added to queue %p \"%s\"\n", @@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task */ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) { - if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) - __rpc_do_wake_up_task(queue, task); + if (RPC_IS_QUEUED(task)) { + smp_rmb(); + if (task->tk_waitqueue == queue) + __rpc_do_wake_up_task(queue, task); + } } /* diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c1d8476b7692..3d02130828da 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt) xs_tcp_shutdown(xprt); } +static void xs_local_destroy(struct rpc_xprt *xprt) +{ + xs_close(xprt); + xs_free_peer_addresses(xprt); + xprt_free(xprt); + module_put(THIS_MODULE); +} + /** * xs_destroy - prepare to shutdown a transport * @xprt: doomed transport @@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt) cancel_delayed_work_sync(&transport->connect_worker); - xs_close(xprt); - xs_free_peer_addresses(xprt); - xprt_free(xprt); - module_put(THIS_MODULE); + xs_local_destroy(xprt); } static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) @@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = { .send_request = xs_local_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_def, .close = xs_close, - .destroy = xs_destroy, + .destroy = xs_local_destroy, .print_stats = xs_local_print_stats, }; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a9622b6cd916..515ce38e4f4c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) if (addr) { addr->family = AF_TIPC; addr->addrtype = TIPC_ADDR_ID; + memset(&addr->addr, 0, sizeof(addr->addr)); addr->addr.id.ref = msg_origport(msg); addr->addr.id.node = msg_orignode(msg); addr->addr.name.domain = 0; /* could leave uninitialized */ @@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, goto exit; } + /* will be updated in set_orig_addr() if needed */ + m->msg_namelen = 0; + timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); restart: @@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, goto exit; } + /* will be updated in set_orig_addr() if needed */ + m->msg_namelen = 0; + target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 51be64f163ec..2db702d82e7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk) #endif } -static int unix_release_sock(struct sock *sk, int embrion) +static void unix_release_sock(struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); struct path path; @@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion) if (unix_tot_inflight) unix_gc(); /* Garbage collect fds */ - - return 0; } static void init_peercred(struct sock *sk) @@ -699,9 +697,10 @@ static int unix_release(struct socket *sock) if (!sk) return 0; + unix_release_sock(sk, 0); sock->sk = NULL; - return unix_release_sock(sk, 0); + return 0; } static int unix_autobind(struct socket *sock) @@ -1994,7 +1993,7 @@ again: if ((UNIXCB(skb).pid != siocb->scm->pid) || (UNIXCB(skb).cred != siocb->scm->cred)) break; - } else { + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { /* Copy credentials */ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); check_creds = 1; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388a..7f93e2a42d7a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) struct vsock_sock *vsk; list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) - if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) + if (addr->svm_port == vsk->local_addr.svm_port) return sk_vsock(vsk); return NULL; @@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, list_for_each_entry(vsk, vsock_connected_sockets(src, dst), connected_table) { - if (vsock_addr_equals_addr(src, &vsk->remote_addr) - && vsock_addr_equals_addr(dst, &vsk->local_addr)) { + if (vsock_addr_equals_addr(src, &vsk->remote_addr) && + dst->svm_port == vsk->local_addr.svm_port) { return sk_vsock(vsk); } } @@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb, vsk = vsock_sk(sk); err = 0; + msg->msg_namelen = 0; + lock_sock(sk); if (sk->sk_state != SS_CONNECTED) { diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a153..5e04d3d96285 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending( struct vsock_sock *vlistener; struct vsock_sock *vpending; struct sock *pending; + struct sockaddr_vm src; + + vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); vlistener = vsock_sk(listener); list_for_each_entry(vpending, &vlistener->pending_links, pending_links) { - struct sockaddr_vm src; - struct sockaddr_vm dst; - - vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); - vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); - if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && - vsock_addr_equals_addr(&dst, &vpending->local_addr)) { + pkt->dst_port == vpending->local_addr.svm_port) { pending = sk_vsock(vpending); sock_hold(pending); goto found; @@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) */ bh_lock_sock(sk); - if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) - vmci_trans(vsk)->notify_ops->handle_notify_pkt( - sk, pkt, true, &dst, &src, - &bh_process_pkt); + if (!sock_owned_by_user(sk)) { + /* The local context ID may be out of date, update it. */ + vsk->local_addr.svm_cid = dst.svm_cid; + + if (sk->sk_state == SS_CONNECTED) + vmci_trans(vsk)->notify_ops->handle_notify_pkt( + sk, pkt, true, &dst, &src, + &bh_process_pkt); + } bh_unlock_sock(sk); @@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work) lock_sock(sk); + /* The local context ID may be out of date. */ + vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; + switch (sk->sk_state) { case SS_LISTEN: vmci_transport_recv_listen(sk, pkt); @@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk, pending = vmci_transport_get_pending(sk, pkt); if (pending) { lock_sock(pending); + + /* The local context ID may be out of date. */ + vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; + switch (pending->sk_state) { case SS_CONNECTING: err = vmci_transport_recv_connecting_server(sk, @@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; + msg->msg_namelen = 0; + /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); @@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, if (err) goto out; - msg->msg_namelen = 0; if (msg->msg_name) { struct sockaddr_vm *vm_addr; diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c59..ec2611b4ea0e 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c @@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, } EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); -bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, - const struct sockaddr_vm *other) -{ - return (addr->svm_cid == VMADDR_CID_ANY || - other->svm_cid == VMADDR_CID_ANY || - addr->svm_cid == other->svm_cid) && - addr->svm_port == other->svm_port; -} -EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); - int vsock_addr_cast(const struct sockaddr *addr, size_t len, struct sockaddr_vm **out_addr) { diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf843..9ccd5316eac0 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h @@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr); void vsock_addr_unbind(struct sockaddr_vm *addr); bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, const struct sockaddr_vm *other); -bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, - const struct sockaddr_vm *other); int vsock_addr_cast(const struct sockaddr *addr, size_t len, struct sockaddr_vm **out_addr); diff --git a/net/wireless/core.c b/net/wireless/core.c index 5ffff039b017..6ddf74f0ae1e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) rdev_rfkill_poll(rdev); } +void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + lockdep_assert_held(&rdev->devlist_mtx); + lockdep_assert_held(&rdev->sched_scan_mtx); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) + return; + + if (!wdev->p2p_started) + return; + + rdev_stop_p2p_device(rdev, wdev); + wdev->p2p_started = false; + + rdev->opencount--; + + if (rdev->scan_req && rdev->scan_req->wdev == wdev) { + bool busy = work_busy(&rdev->scan_done_wk); + + /* + * If the work isn't pending or running (in which case it would + * be waiting for the lock we hold) the driver didn't properly + * cancel the scan when the interface was removed. In this case + * warn and leak the scan request object to not crash later. + */ + WARN_ON(!busy); + + rdev->scan_req->aborted = true; + ___cfg80211_scan_done(rdev, !busy); + } +} + static int cfg80211_rfkill_set_block(void *data, bool blocked) { struct cfg80211_registered_device *rdev = data; @@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) return 0; rtnl_lock(); - mutex_lock(&rdev->devlist_mtx); + + /* read-only iteration need not hold the devlist_mtx */ list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->netdev) { @@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) /* otherwise, check iftype */ switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: - if (!wdev->p2p_started) - break; - rdev_stop_p2p_device(rdev, wdev); - wdev->p2p_started = false; - rdev->opencount--; + /* but this requires it */ + mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); + cfg80211_stop_p2p_device(rdev, wdev); + mutex_unlock(&rdev->sched_scan_mtx); + mutex_unlock(&rdev->devlist_mtx); break; default: break; } } - mutex_unlock(&rdev->devlist_mtx); rtnl_unlock(); return 0; @@ -367,8 +401,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) rdev->wiphy.rts_threshold = (u32) -1; rdev->wiphy.coverage_class = 0; - rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | - NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; + rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; return &rdev->wiphy; } @@ -746,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work) wdev = container_of(work, struct wireless_dev, cleanup_work); rdev = wiphy_to_dev(wdev->wiphy); - cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->sched_scan_mtx); if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { rdev->scan_req->aborted = true; ___cfg80211_scan_done(rdev, true); } - cfg80211_unlock_rdev(rdev); - - mutex_lock(&rdev->sched_scan_mtx); - if (WARN_ON(rdev->sched_scan_req && rdev->sched_scan_req->dev == wdev->netdev)) { __cfg80211_stop_sched_scan(rdev, false); @@ -782,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) return; mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); list_del_rcu(&wdev->list); rdev->devlist_generation++; switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: - if (!wdev->p2p_started) - break; - rdev_stop_p2p_device(rdev, wdev); - wdev->p2p_started = false; - rdev->opencount--; + cfg80211_stop_p2p_device(rdev, wdev); break; default: WARN_ON_ONCE(1); break; } + mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); } EXPORT_SYMBOL(cfg80211_unregister_wdev); @@ -937,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, cfg80211_update_iface_num(rdev, wdev->iftype, 1); cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); switch (wdev->iftype) { #ifdef CONFIG_CFG80211_WEXT @@ -968,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, break; } wdev_unlock(wdev); + mutex_unlock(&rdev->sched_scan_mtx); rdev->opencount++; mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8a..5845c2b37aa8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num); +void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); + #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e652d05ff712..58e13a8c95f9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, if ((chan->flags & IEEE80211_CHAN_RADAR) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) - goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power))) @@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag dev->wiphy.max_acl_mac_addrs)) goto nla_put_failure; - if (dev->wiphy.extended_capabilities && - (nla_put(msg, NL80211_ATTR_EXT_CAPA, - dev->wiphy.extended_capabilities_len, - dev->wiphy.extended_capabilities) || - nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, - dev->wiphy.extended_capabilities_len, - dev->wiphy.extended_capabilities_mask))) - goto nla_put_failure; - return genlmsg_end(msg, hdr); nla_put_failure: @@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { - int idx = 0; + int idx = 0, ret; int start = cb->args[0]; struct cfg80211_registered_device *dev; @@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) continue; if (++idx <= start) continue; - if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - dev) < 0) { + ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + dev); + if (ret < 0) { + /* + * If sending the wiphy data didn't fit (ENOBUFS or + * EMSGSIZE returned), this SKB is still empty (so + * it's not too big because another wiphy dataset is + * already in the skb) and we've not tried to adjust + * the dump allocation yet ... then adjust the alloc + * size to be bigger, and return 1 but with the empty + * skb. This results in an empty message being RX'ed + * in userspace, but that is ignored. + * + * We can then retry with the larger buffer. + */ + if ((ret == -ENOBUFS || ret == -EMSGSIZE) && + !skb->len && + cb->min_dump_alloc < 4096) { + cb->min_dump_alloc = 4096; + mutex_unlock(&cfg80211_mutex); + return 1; + } idx--; break; } @@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; struct cfg80211_registered_device *dev = info->user_ptr[0]; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(4096, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -4703,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->scan) return -EOPNOTSUPP; - if (rdev->scan_req) - return -EBUSY; + mutex_lock(&rdev->sched_scan_mtx); + if (rdev->scan_req) { + err = -EBUSY; + goto unlock; + } if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); - if (!n_channels) - return -EINVAL; + if (!n_channels) { + err = -EINVAL; + goto unlock; + } } else { enum ieee80211_band band; n_channels = 0; @@ -4724,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; - if (n_ssids > wiphy->max_scan_ssids) - return -EINVAL; + if (n_ssids > wiphy->max_scan_ssids) { + err = -EINVAL; + goto unlock; + } if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; - if (ie_len > wiphy->max_scan_ie_len) - return -EINVAL; + if (ie_len > wiphy->max_scan_ie_len) { + err = -EINVAL; + goto unlock; + } request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); - if (!request) - return -ENOMEM; + if (!request) { + err = -ENOMEM; + goto unlock; + } if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; @@ -4877,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) kfree(request); } + unlock: + mutex_unlock(&rdev->sched_scan_mtx); return err; } @@ -7750,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->stop_p2p_device) return -EOPNOTSUPP; - if (!wdev->p2p_started) - return 0; - - rdev_stop_p2p_device(rdev, wdev); - wdev->p2p_started = false; - - mutex_lock(&rdev->devlist_mtx); - rdev->opencount--; - mutex_unlock(&rdev->devlist_mtx); - - if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { - rdev->scan_req->aborted = true; - ___cfg80211_scan_done(rdev, true); - } + mutex_lock(&rdev->sched_scan_mtx); + cfg80211_stop_p2p_device(rdev, wdev); + mutex_unlock(&rdev->sched_scan_mtx); return 0; } @@ -8487,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg, struct nlattr *nest; int i; - ASSERT_RDEV_LOCK(rdev); + lockdep_assert_held(&rdev->sched_scan_mtx); if (WARN_ON(!req)) return 0; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca0079..fd99ea495b7e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) union iwreq_data wrqu; #endif - ASSERT_RDEV_LOCK(rdev); + lockdep_assert_held(&rdev->sched_scan_mtx); request = rdev->scan_req; @@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) rdev = container_of(wk, struct cfg80211_registered_device, scan_done_wk); - cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->sched_scan_mtx); ___cfg80211_scan_done(rdev, false); - cfg80211_unlock_rdev(rdev); + mutex_unlock(&rdev->sched_scan_mtx); } void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) @@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); if (found) { - found->pub.beacon_interval = tmp->pub.beacon_interval; - found->pub.signal = tmp->pub.signal; - found->pub.capability = tmp->pub.capability; - found->ts = tmp->ts; - /* Update IEs */ if (rcu_access_pointer(tmp->pub.proberesp_ies)) { const struct cfg80211_bss_ies *old; @@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, if (found->pub.hidden_beacon_bss && !list_empty(&found->hidden_list)) { + const struct cfg80211_bss_ies *f; + /* * The found BSS struct is one of the probe * response members of a group, but we're @@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, * SSID to showing it, which is confusing so * drop this information. */ + + f = rcu_access_pointer(tmp->pub.beacon_ies); + kfree_rcu((struct cfg80211_bss_ies *)f, + rcu_head); goto drop; } @@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } + + found->pub.beacon_interval = tmp->pub.beacon_interval; + found->pub.signal = tmp->pub.signal; + found->pub.capability = tmp->pub.capability; + found->ts = tmp->ts; } else { struct cfg80211_internal_bss *new; struct cfg80211_internal_bss *hidden; @@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); + mutex_lock(&rdev->sched_scan_mtx); if (rdev->scan_req) { err = -EBUSY; goto out; @@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, dev_hold(dev); } out: + mutex_unlock(&rdev->sched_scan_mtx); kfree(creq); cfg80211_unlock_rdev(rdev); return err; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b1..482c70e70127 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); + lockdep_assert_held(&rdev->sched_scan_mtx); if (rdev->scan_req) return -EBUSY; @@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work) rtnl_lock(); cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { wdev_lock(wdev); @@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work) wdev_unlock(wdev); } + mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); rtnl_unlock(); @@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; - mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); wdev_lock(wdev); __cfg80211_sme_scan_done(dev); wdev_unlock(wdev); - mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); } void cfg80211_sme_rx_auth(struct net_device *dev, @@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, int err; mutex_lock(&rdev->devlist_mtx); + /* might request scan - scan_mtx -> wdev_mtx dependency */ + mutex_lock(&rdev->sched_scan_mtx); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); + mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); return err; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e19..7586de77a2f8 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -27,7 +27,8 @@ #define WIPHY_PR_ARG __entry->wiphy_name #define WDEV_ENTRY __field(u32, id) -#define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) +#define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ + ? wdev->identifier : 0) #define WDEV_PR_FMT "wdev(%u)" #define WDEV_PR_ARG (__entry->id) @@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl, ), TP_fast_assign( WIPHY_ASSIGN; - WIPHY_ASSIGN; + NETDEV_ASSIGN; __entry->acl_policy = params->acl_policy; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99c..e79cb5c0655a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); + mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; @@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); err = 0; @@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); + mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; @@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); + mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); + mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); return err; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e5..8dafe6d3c6e4 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) x->xflags &= ~XFRM_TIME_DEFER; } +static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) +{ + u32 seq_diff, oseq_diff; + struct km_event c; + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; + struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; + + /* we send notify messages in case + * 1. we updated on of the sequence numbers, and the seqno difference + * is at least x->replay_maxdiff, in this case we also update the + * timeout of our timer function + * 2. if x->replay_maxage has elapsed since last update, + * and there were changes + * + * The state structure must be locked! + */ + + switch (event) { + case XFRM_REPLAY_UPDATE: + if (!x->replay_maxdiff) + break; + + if (replay_esn->seq_hi == preplay_esn->seq_hi) + seq_diff = replay_esn->seq - preplay_esn->seq; + else + seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; + + if (replay_esn->oseq_hi == preplay_esn->oseq_hi) + oseq_diff = replay_esn->oseq - preplay_esn->oseq; + else + oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; + + if (seq_diff < x->replay_maxdiff && + oseq_diff < x->replay_maxdiff) { + + if (x->xflags & XFRM_TIME_DEFER) + event = XFRM_REPLAY_TIMEOUT; + else + return; + } + + break; + + case XFRM_REPLAY_TIMEOUT: + if (memcmp(x->replay_esn, x->preplay_esn, + xfrm_replay_state_esn_len(replay_esn)) == 0) { + x->xflags |= XFRM_TIME_DEFER; + return; + } + + break; + } + + memcpy(x->preplay_esn, x->replay_esn, + xfrm_replay_state_esn_len(replay_esn)); + c.event = XFRM_MSG_NEWAE; + c.data.aevent = event; + km_state_notify(x, &c); + + if (x->replay_maxage && + !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) + x->xflags &= ~XFRM_TIME_DEFER; +} + static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; @@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = { .advance = xfrm_replay_advance_esn, .check = xfrm_replay_check_esn, .recheck = xfrm_replay_recheck_esn, - .notify = xfrm_replay_notify_bmp, + .notify = xfrm_replay_notify_esn, .overflow = xfrm_replay_overflow_esn, }; diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index 25f216a841d5..477d137c0557 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -14,7 +14,7 @@ kbuild-file := $(srctree)/$(obj)/Kbuild include $(kbuild-file) # called may set destination dir (when installing to asm/) -_dst := $(or $(destination-y),$(dst),$(obj)) +_dst := $(if $(destination-y),$(destination-y),$(if $(dst),$(dst),$(obj))) old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild ifneq ($(wildcard $(old-kbuild-file)),) @@ -48,13 +48,14 @@ all-files := $(header-y) $(genhdr-y) $(wrapper-files) output-files := $(addprefix $(installdir)/, $(all-files)) input-files := $(foreach hdr, $(header-y), \ - $(or \ + $(if $(wildcard $(srcdir)/$(hdr)), \ $(wildcard $(srcdir)/$(hdr)), \ - $(wildcard $(oldsrcdir)/$(hdr)), \ - $(error Missing UAPI file $(srcdir)/$(hdr)) \ + $(if $(wildcard $(oldsrcdir)/$(hdr)), \ + $(wildcard $(oldsrcdir)/$(hdr)), \ + $(error Missing UAPI file $(srcdir)/$(hdr))) \ )) \ $(foreach hdr, $(genhdr-y), \ - $(or \ + $(if $(wildcard $(gendir)/$(hdr)), \ $(wildcard $(gendir)/$(hdr)), \ $(error Missing generated UAPI file $(gendir)/$(hdr)) \ )) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index b28cc384a5bc..4de4bc48493b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3016,6 +3016,7 @@ sub process { $dstat !~ /^'X'$/ && # character constants $dstat !~ /$exceptions/ && $dstat !~ /^\.$Ident\s*=/ && # .foo = + $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...) $dstat !~ /^for\s*$Constant$/ && # for (...) $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar() diff --git a/security/capability.c b/security/capability.c index 579775088967..6783c3e6c88e 100644 --- a/security/capability.c +++ b/security/capability.c @@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security) { return 0; } + +static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +} + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, tun_dev_open); set_to_cap_if_null(ops, tun_dev_attach_queue); set_to_cap_if_null(ops, tun_dev_attach); + set_to_cap_if_null(ops, skb_owned_by); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM set_to_cap_if_null(ops, xfrm_policy_alloc_security); diff --git a/security/keys/compat.c b/security/keys/compat.c index 1c261763f479..d65fa7fa29ba 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -40,12 +40,12 @@ static long compat_keyctl_instantiate_key_iov( ARRAY_SIZE(iovstack), iovstack, &iov); if (ret < 0) - return ret; + goto err; if (ret == 0) goto no_payload_free; ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); - +err: if (iov != iovstack) kfree(iov); return ret; diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index a571fad91010..42defae1e161 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -57,7 +57,7 @@ int install_user_keyrings(void) kenter("%p{%u}", user, uid); - if (user->uid_keyring) { + if (user->uid_keyring && user->session_keyring) { kleave(" = 0 [exist]"); return 0; } diff --git a/security/security.c b/security/security.c index 7b88c6aeaed4..03f248b84e9f 100644 --- a/security/security.c +++ b/security/security.c @@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security) } EXPORT_SYMBOL(security_tun_dev_open); +void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ + security_ops->skb_owned_by(skb, sk); +} + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2fa28c88900c..7171a957b933 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -51,6 +51,7 @@ #include <linux/tty.h> #include <net/icmp.h> #include <net/ip.h> /* for local_port_range[] */ +#include <net/sock.h> #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ #include <net/net_namespace.h> #include <net/netlabel.h> @@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); } +static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ + skb_set_owner_w(skb, sk); +} + static int selinux_secmark_relabel_packet(u32 sid) { const struct task_security_struct *__tsec; @@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = { .tun_dev_attach_queue = selinux_tun_dev_attach_queue, .tun_dev_attach = selinux_tun_dev_attach, .tun_dev_open = selinux_tun_dev_open, + .skb_owned_by = selinux_skb_owned_by, #ifdef CONFIG_SECURITY_NETWORK_XFRM .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 48665ecd1197..8ab295154517 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, if (old_ctx) { new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, - GFP_KERNEL); + GFP_ATOMIC); if (!new_ctx) return -ENOMEM; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 23414b93771f..13c88fbcf037 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -347,10 +347,8 @@ int yama_ptrace_traceme(struct task_struct *parent) /* Only disallow PTRACE_TRACEME on more aggressive settings. */ switch (ptrace_scope) { case YAMA_SCOPE_CAPABILITY: - rcu_read_lock(); - if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE)) + if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE)) rc = -EPERM; - rcu_read_unlock(); break; case YAMA_SCOPE_NO_ATTACH: rc = -EPERM; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 71ae86ca64ac..eb560fa32321 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { - long size; - unsigned long offset; + struct snd_pcm_runtime *runtime = substream->runtime;; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); - area->vm_flags |= VM_IO; - size = area->vm_end - area->vm_start; - offset = area->vm_pgoff << PAGE_SHIFT; - if (io_remap_pfn_range(area, area->vm_start, - (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, - size, area->vm_page_prot)) - return -EAGAIN; - return 0; + return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c index 160b1bd0cd62..24d44b2f61ac 100644 --- a/sound/core/seq/seq_timer.c +++ b/sound/core/seq/seq_timer.c @@ -290,10 +290,10 @@ int snd_seq_timer_open(struct snd_seq_queue *q) tid.device = SNDRV_TIMER_GLOBAL_SYSTEM; err = snd_timer_open(&t, str, &tid, q->queue); } - if (err < 0) { - snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); - return err; - } + } + if (err < 0) { + snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); + return err; } t->callback = snd_seq_timer_interrupt; t->callback_data = q; diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index 30bcfe470f83..4ff60a6427d9 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c @@ -545,6 +545,9 @@ static void seq_chn_common_event(unsigned char *event_rec) case MIDI_PGM_CHANGE: if (seq_mode == SEQ_2) { + if (chn > 15) + break; + synth_devs[dev]->chn_info[chn].pgm_num = p1; if ((int) dev >= num_synths) synth_devs[dev]->set_instr(dev, chn, p1); @@ -596,6 +599,9 @@ static void seq_chn_common_event(unsigned char *event_rec) case MIDI_PITCH_BEND: if (seq_mode == SEQ_2) { + if (chn > 15) + break; + synth_devs[dev]->chn_info[chn].bender_value = w14; if ((int) dev < num_synths) diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c index 3536b076b529..0aabfedeecba 100644 --- a/sound/pci/asihpi/asihpi.c +++ b/sound/pci/asihpi/asihpi.c @@ -2549,7 +2549,7 @@ static int snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi, static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi) { - struct snd_card *card = asihpi->card; + struct snd_card *card; unsigned int idx = 0; unsigned int subindex = 0; int err; @@ -2557,6 +2557,7 @@ static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi) if (snd_BUG_ON(!asihpi)) return -EINVAL; + card = asihpi->card; strcpy(card->mixername, "Asihpi Mixer"); err = diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 97c68dd24ef5..4aba7646dd9c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -173,7 +173,7 @@ const char *snd_hda_get_jack_type(u32 cfg) "Line Out", "Speaker", "HP Out", "CD", "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", "Line In", "Aux", "Mic", "Telephony", - "SPDIF In", "Digitial In", "Reserved", "Other" + "SPDIF In", "Digital In", "Reserved", "Other" }; return jack_types[(cfg & AC_DEFCFG_DEVICE) @@ -494,7 +494,7 @@ static unsigned int get_num_conns(struct hda_codec *codec, hda_nid_t nid) int snd_hda_get_num_raw_conns(struct hda_codec *codec, hda_nid_t nid) { - return get_num_conns(codec, nid) & AC_CLIST_LENGTH; + return snd_hda_get_raw_connections(codec, nid, NULL, 0); } /** @@ -517,9 +517,6 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid, hda_nid_t prev_nid; int null_count = 0; - if (snd_BUG_ON(!conn_list || max_conns <= 0)) - return -EINVAL; - parm = get_num_conns(codec, nid); if (!parm) return 0; @@ -545,7 +542,8 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid, AC_VERB_GET_CONNECT_LIST, 0); if (parm == -1 && codec->bus->rirb_error) return -EIO; - conn_list[0] = parm & mask; + if (conn_list) + conn_list[0] = parm & mask; return 1; } @@ -580,14 +578,20 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid, continue; } for (n = prev_nid + 1; n <= val; n++) { + if (conn_list) { + if (conns >= max_conns) + return -ENOSPC; + conn_list[conns] = n; + } + conns++; + } + } else { + if (conn_list) { if (conns >= max_conns) return -ENOSPC; - conn_list[conns++] = n; + conn_list[conns] = val; } - } else { - if (conns >= max_conns) - return -ENOSPC; - conn_list[conns++] = val; + conns++; } prev_nid = val; } @@ -3140,7 +3144,7 @@ static unsigned int convert_to_spdif_status(unsigned short val) if (val & AC_DIG1_PROFESSIONAL) sbits |= IEC958_AES0_PROFESSIONAL; if (sbits & IEC958_AES0_PROFESSIONAL) { - if (sbits & AC_DIG1_EMPHASIS) + if (val & AC_DIG1_EMPHASIS) sbits |= IEC958_AES0_PRO_EMPHASIS_5015; } else { if (val & AC_DIG1_EMPHASIS) diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 7dd846380a50..d0d7ac1e99d2 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -320,7 +320,7 @@ int snd_hdmi_get_eld(struct hda_codec *codec, hda_nid_t nid, unsigned char *buf, int *eld_size) { int i; - int ret; + int ret = 0; int size; /* diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 78897d05d80f..2dbe767be16b 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -740,7 +740,7 @@ EXPORT_SYMBOL_HDA(snd_hda_activate_path); static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) { struct hda_gen_spec *spec = codec->spec; - bool changed; + bool changed = false; int i; if (!spec->power_down_unused || path->active) @@ -995,6 +995,8 @@ enum { BAD_NO_EXTRA_SURR_DAC = 0x101, /* Primary DAC shared with main surrounds */ BAD_SHARED_SURROUND = 0x100, + /* No independent HP possible */ + BAD_NO_INDEP_HP = 0x40, /* Primary DAC shared with main CLFE */ BAD_SHARED_CLFE = 0x10, /* Primary DAC shared with extra surrounds */ @@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx) return snd_hda_get_path_idx(codec, path); } +/* check whether the independent HP is available with the current config */ +static bool indep_hp_possible(struct hda_codec *codec) +{ + struct hda_gen_spec *spec = codec->spec; + struct auto_pin_cfg *cfg = &spec->autocfg; + struct nid_path *path; + int i, idx; + + if (cfg->line_out_type == AUTO_PIN_HP_OUT) + idx = spec->out_paths[0]; + else + idx = spec->hp_paths[0]; + path = snd_hda_get_path_from_idx(codec, idx); + if (!path) + return false; + + /* assume no path conflicts unless aamix is involved */ + if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid)) + return true; + + /* check whether output paths contain aamix */ + for (i = 0; i < cfg->line_outs; i++) { + if (spec->out_paths[i] == idx) + break; + path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); + if (path && is_nid_contained(path, spec->mixer_nid)) + return false; + } + for (i = 0; i < cfg->speaker_outs; i++) { + path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]); + if (path && is_nid_contained(path, spec->mixer_nid)) + return false; + } + + return true; +} + /* fill the empty entries in the dac array for speaker/hp with the * shared dac pointed by the paths */ @@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda_codec *codec, badness += BAD_MULTI_IO; } + if (spec->indep_hp && !indep_hp_possible(codec)) + badness += BAD_NO_INDEP_HP; + /* re-fill the shared DAC for speaker / headphone */ if (cfg->line_out_type != AUTO_PIN_HP_OUT) refill_shared_dacs(codec, cfg->hp_outs, @@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda_codec *codec) cfg->speaker_pins, val); } + /* clear indep_hp flag if not available */ + if (spec->indep_hp && !indep_hp_possible(codec)) + spec->indep_hp = 0; + kfree(best_cfg); return 0; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4cea6bb6fade..bcd40ee488e3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -134,8 +134,8 @@ MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " * this may give more power-saving, but will take longer time to * wake up. */ -static int power_save_controller = -1; -module_param(power_save_controller, bint, 0644); +static bool power_save_controller = 1; +module_param(power_save_controller, bool, 0644); MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); #endif /* CONFIG_PM */ @@ -415,6 +415,8 @@ struct azx_dev { unsigned int opened :1; unsigned int running :1; unsigned int irq_pending :1; + unsigned int prepared:1; + unsigned int locked:1; /* * For VIA: * A flag to ensure DMA position is 0 @@ -426,8 +428,25 @@ struct azx_dev { struct timecounter azx_tc; struct cyclecounter azx_cc; + +#ifdef CONFIG_SND_HDA_DSP_LOADER + struct mutex dsp_mutex; +#endif }; +/* DSP lock helpers */ +#ifdef CONFIG_SND_HDA_DSP_LOADER +#define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex) +#define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex) +#define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex) +#define dsp_is_locked(dev) ((dev)->locked) +#else +#define dsp_lock_init(dev) do {} while (0) +#define dsp_lock(dev) do {} while (0) +#define dsp_unlock(dev) do {} while (0) +#define dsp_is_locked(dev) 0 +#endif + /* CORB/RIRB */ struct azx_rb { u32 *buf; /* CORB/RIRB buffer @@ -527,6 +546,10 @@ struct azx { /* card list (for power_save trigger) */ struct list_head list; + +#ifdef CONFIG_SND_HDA_DSP_LOADER + struct azx_dev saved_azx_dev; +#endif }; #define CREATE_TRACE_POINTS @@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) dev = chip->capture_index_offset; nums = chip->capture_streams; } - for (i = 0; i < nums; i++, dev++) - if (!chip->azx_dev[dev].opened) { - res = &chip->azx_dev[dev]; - if (res->assigned_key == key) - break; + for (i = 0; i < nums; i++, dev++) { + struct azx_dev *azx_dev = &chip->azx_dev[dev]; + dsp_lock(azx_dev); + if (!azx_dev->opened && !dsp_is_locked(azx_dev)) { + res = azx_dev; + if (res->assigned_key == key) { + res->opened = 1; + res->assigned_key = key; + dsp_unlock(azx_dev); + return azx_dev; + } } + dsp_unlock(azx_dev); + } if (res) { + dsp_lock(res); res->opened = 1; res->assigned_key = key; + dsp_unlock(res); } return res; } @@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream, struct azx_dev *azx_dev = get_azx_dev(substream); int ret; + dsp_lock(azx_dev); + if (dsp_is_locked(azx_dev)) { + ret = -EBUSY; + goto unlock; + } + mark_runtime_wc(chip, azx_dev, substream, false); azx_dev->bufsize = 0; azx_dev->period_bytes = 0; @@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream, ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (ret < 0) - return ret; + goto unlock; mark_runtime_wc(chip, azx_dev, substream, true); + unlock: + dsp_unlock(azx_dev); return ret; } @@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream) struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; /* reset BDL address */ - azx_sd_writel(azx_dev, SD_BDLPL, 0); - azx_sd_writel(azx_dev, SD_BDLPU, 0); - azx_sd_writel(azx_dev, SD_CTL, 0); - azx_dev->bufsize = 0; - azx_dev->period_bytes = 0; - azx_dev->format_val = 0; + dsp_lock(azx_dev); + if (!dsp_is_locked(azx_dev)) { + azx_sd_writel(azx_dev, SD_BDLPL, 0); + azx_sd_writel(azx_dev, SD_BDLPU, 0); + azx_sd_writel(azx_dev, SD_CTL, 0); + azx_dev->bufsize = 0; + azx_dev->period_bytes = 0; + azx_dev->format_val = 0; + } snd_hda_codec_cleanup(apcm->codec, hinfo, substream); mark_runtime_wc(chip, azx_dev, substream, false); + azx_dev->prepared = 0; + dsp_unlock(azx_dev); return snd_pcm_lib_free_pages(substream); } @@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); unsigned short ctls = spdif ? spdif->ctls : 0; + dsp_lock(azx_dev); + if (dsp_is_locked(azx_dev)) { + err = -EBUSY; + goto unlock; + } + azx_stream_reset(chip, azx_dev); format_val = snd_hda_calc_stream_format(runtime->rate, runtime->channels, @@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) snd_printk(KERN_ERR SFX "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); - return -EINVAL; + err = -EINVAL; + goto unlock; } bufsize = snd_pcm_lib_buffer_bytes(substream); @@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) azx_dev->no_period_wakeup = runtime->no_period_wakeup; err = azx_setup_periods(chip, substream, azx_dev); if (err < 0) - return err; + goto unlock; } /* wallclk has 24Mhz clock source */ @@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && stream_tag > chip->capture_streams) stream_tag -= chip->capture_streams; - return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, + err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, azx_dev->format_val, substream); + + unlock: + if (!err) + azx_dev->prepared = 1; + dsp_unlock(azx_dev); + return err; } static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) @@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) azx_dev = get_azx_dev(substream); trace_azx_pcm_trigger(chip, azx_dev, cmd); + if (dsp_is_locked(azx_dev) || !azx_dev->prepared) + return -EPIPE; + switch (cmd) { case SNDRV_PCM_TRIGGER_START: rstart = 1; @@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format, struct azx_dev *azx_dev; int err; - if (snd_hda_lock_devices(bus)) - return -EBUSY; + azx_dev = azx_get_dsp_loader_dev(chip); + + dsp_lock(azx_dev); + spin_lock_irq(&chip->reg_lock); + if (azx_dev->running || azx_dev->locked) { + spin_unlock_irq(&chip->reg_lock); + err = -EBUSY; + goto unlock; + } + azx_dev->prepared = 0; + chip->saved_azx_dev = *azx_dev; + azx_dev->locked = 1; + spin_unlock_irq(&chip->reg_lock); err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), byte_size, bufp); if (err < 0) - goto unlock; + goto err_alloc; mark_pages_wc(chip, bufp, true); - azx_dev = azx_get_dsp_loader_dev(chip); azx_dev->bufsize = byte_size; azx_dev->period_bytes = byte_size; azx_dev->format_val = format; @@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format, goto error; azx_setup_controller(chip, azx_dev); + dsp_unlock(azx_dev); return azx_dev->stream_tag; error: mark_pages_wc(chip, bufp, false); snd_dma_free_pages(bufp); -unlock: - snd_hda_unlock_devices(bus); + err_alloc: + spin_lock_irq(&chip->reg_lock); + if (azx_dev->opened) + *azx_dev = chip->saved_azx_dev; + azx_dev->locked = 0; + spin_unlock_irq(&chip->reg_lock); + unlock: + dsp_unlock(azx_dev); return err; } @@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus, struct azx *chip = bus->private_data; struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); - if (!dmab->area) + if (!dmab->area || !azx_dev->locked) return; + dsp_lock(azx_dev); /* reset BDL address */ azx_sd_writel(azx_dev, SD_BDLPL, 0); azx_sd_writel(azx_dev, SD_BDLPU, 0); @@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus, snd_dma_free_pages(dmab); dmab->area = NULL; - snd_hda_unlock_devices(bus); + spin_lock_irq(&chip->reg_lock); + if (azx_dev->opened) + *azx_dev = chip->saved_azx_dev; + azx_dev->locked = 0; + spin_unlock_irq(&chip->reg_lock); + dsp_unlock(azx_dev); } #endif /* CONFIG_SND_HDA_DSP_LOADER */ @@ -2846,8 +2931,6 @@ static int azx_runtime_idle(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; - if (power_save_controller > 0) - return 0; if (!power_save_controller || !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) return -EBUSY; @@ -3481,6 +3564,7 @@ static int azx_first_init(struct azx *chip) } for (i = 0; i < chip->num_streams; i++) { + dsp_lock_init(&chip->azx_dev[i]); /* allocate memory for the BDL for each stream */ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index eefc4563b2f9..0792b5725f9c 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -3239,7 +3239,7 @@ static int ca0132_set_vipsource(struct hda_codec *codec, int val) struct ca0132_spec *spec = codec->spec; unsigned int tmp; - if (!dspload_is_loaded(codec)) + if (spec->dsp_state != DSP_DOWNLOADED) return 0; /* if CrystalVoice if off, vipsource should be 0 */ @@ -4267,11 +4267,12 @@ static void ca0132_refresh_widget_caps(struct hda_codec *codec) */ static void ca0132_setup_defaults(struct hda_codec *codec) { + struct ca0132_spec *spec = codec->spec; unsigned int tmp; int num_fx; int idx, i; - if (!dspload_is_loaded(codec)) + if (spec->dsp_state != DSP_DOWNLOADED) return; /* out, in effects + voicefx */ @@ -4351,12 +4352,16 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec) return false; dsp_os_image = (struct dsp_image_seg *)(fw_entry->data); - dspload_image(codec, dsp_os_image, 0, 0, true, 0); + if (dspload_image(codec, dsp_os_image, 0, 0, true, 0)) { + pr_err("ca0132 dspload_image failed.\n"); + goto exit_download; + } + dsp_loaded = dspload_wait_loaded(codec); +exit_download: release_firmware(fw_entry); - return dsp_loaded; } @@ -4367,16 +4372,13 @@ static void ca0132_download_dsp(struct hda_codec *codec) #ifndef CONFIG_SND_HDA_CODEC_CA0132_DSP return; /* NOP */ #endif - spec->dsp_state = DSP_DOWNLOAD_INIT; - if (spec->dsp_state == DSP_DOWNLOAD_INIT) { - chipio_enable_clocks(codec); - spec->dsp_state = DSP_DOWNLOADING; - if (!ca0132_download_dsp_images(codec)) - spec->dsp_state = DSP_DOWNLOAD_FAILED; - else - spec->dsp_state = DSP_DOWNLOADED; - } + chipio_enable_clocks(codec); + spec->dsp_state = DSP_DOWNLOADING; + if (!ca0132_download_dsp_images(codec)) + spec->dsp_state = DSP_DOWNLOAD_FAILED; + else + spec->dsp_state = DSP_DOWNLOADED; if (spec->dsp_state == DSP_DOWNLOADED) ca0132_set_dsp_msr(codec, true); diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 72ebb8a36b13..0d9c58f13560 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec *codec) snd_hda_gen_update_outputs(codec); if (spec->gpio_eapd_hp) { - unsigned int gpio = spec->gen.hp_jack_present ? + spec->gpio_data = spec->gen.hp_jack_present ? spec->gpio_eapd_hp : spec->gpio_eapd_speaker; snd_hda_codec_write(codec, 0x01, 0, - AC_VERB_SET_GPIO_DATA, gpio); + AC_VERB_SET_GPIO_DATA, spec->gpio_data); } } @@ -506,6 +506,8 @@ static int patch_cs420x(struct hda_codec *codec) if (!spec) return -ENOMEM; + spec->gen.automute_hook = cs_automute; + snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, cs420x_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); @@ -893,6 +895,8 @@ static int patch_cs4210(struct hda_codec *codec) if (!spec) return -ENOMEM; + spec->gen.automute_hook = cs_automute; + snd_hda_pick_fixup(codec, cs421x_models, cs421x_fixup_tbl, cs421x_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 941bf6c766ec..2a89d1eefeb6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_codec *codec) } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); return 0; } @@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_codec *codec) } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); return 0; } @@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_codec *codec) } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); return 0; } @@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct hda_codec *codec) return 0; } +static void cx_auto_free(struct hda_codec *codec) +{ + snd_hda_detach_beep_device(codec); + snd_hda_gen_free(codec); +} + static const struct hda_codec_ops cx_auto_patch_ops = { .build_controls = cx_auto_build_controls, .build_pcms = snd_hda_gen_build_pcms, .init = snd_hda_gen_init, - .free = snd_hda_gen_free, + .free = cx_auto_free, .unsol_event = snd_hda_jack_unsol_event, #ifdef CONFIG_PM .check_power_status = snd_hda_gen_check_power_status, @@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hda_codec *codec) codec->patch_ops = cx_auto_patch_ops; if (spec->beep_amp) - snd_hda_attach_beep_device(codec, spec->beep_amp); + snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); /* Some laptops with Conexant chips show stalls in S3 resume, * which falls into the single-cmd mode. diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 78e1827d0a95..de8ac5c07fd0 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1196,7 +1196,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) _snd_printd(SND_PR_VERBOSE, "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", - codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); + codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid); if (eld->eld_valid) { if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 563c24df4d6f..f15c36bde540 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3440,7 +3440,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec) const hda_nid_t *ssids; if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || - codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) + codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 || + codec->vendor_id == 0x10ec0671) ssids = alc663_ssids; else ssids = alc662_ssids; @@ -3894,6 +3895,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, + { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 83d5335ac348..dafe04ae8c72 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -815,6 +815,29 @@ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity) return 0; } +/* check whether a built-in speaker is included in parsed pins */ +static bool has_builtin_speaker(struct hda_codec *codec) +{ + struct sigmatel_spec *spec = codec->spec; + hda_nid_t *nid_pin; + int nids, i; + + if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) { + nid_pin = spec->gen.autocfg.line_out_pins; + nids = spec->gen.autocfg.line_outs; + } else { + nid_pin = spec->gen.autocfg.speaker_pins; + nids = spec->gen.autocfg.speaker_outs; + } + + for (i = 0; i < nids; i++) { + unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid_pin[i]); + if (snd_hda_get_input_pin_attr(def_conf) == INPUT_PIN_ATTR_INT) + return true; + } + return false; +} + /* * PC beep controls */ @@ -3890,6 +3913,12 @@ static int patch_stac92hd73xx(struct hda_codec *codec) return err; } + /* Don't GPIO-mute speakers if there are no internal speakers, because + * the GPIO might be necessary for Headphone + */ + if (spec->eapd_switch && !has_builtin_speaker(codec)) + spec->eapd_switch = 0; + codec->proc_widget_hook = stac92hd7x_proc_hook; snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index fc176044994d..fc176044994d 100755..100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 7e103f249053..7e103f249053 100755..100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index f2d61a187830..566ea3256e2d 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c @@ -159,6 +159,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream, switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: width = SI476X_PCM_FORMAT_S8; + break; case SNDRV_PCM_FORMAT_S16_LE: width = SI476X_PCM_FORMAT_S16_LE; break; diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index b82bbf584146..34d0201d6a78 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; - struct arizona *arizona = dev_get_drvdata(codec->dev); + struct arizona *arizona = dev_get_drvdata(codec->dev->parent); struct regmap *regmap = codec->control_data; const struct reg_default *patch = NULL; int i, patch_size; diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 134e41c870b9..f8a31ad0b203 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = { { "ROP", NULL, "Right Speaker PGA" }, { "RON", NULL, "Right Speaker PGA" }, + { "Charge Pump", NULL, "CLK_DSP" }, + { "Left Headphone Output PGA", NULL, "Charge Pump" }, { "Right Headphone Output PGA", NULL, "Charge Pump" }, { "Left Line Output PGA", NULL, "Charge Pump" }, diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f3f7e75f8628..9af1bddc4c62 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -828,7 +828,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) &buf_list); if (!buf) { adsp_err(dsp, "Out of memory\n"); - return -ENOMEM; + ret = -ENOMEM; + goto out_fw; } adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", @@ -865,7 +866,7 @@ out_fw: wm_adsp_buf_free(&buf_list); out: kfree(file); - return 0; + return ret; } int wm_adsp1_init(struct wm_adsp *adsp) diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 55464a5b0706..810c7eeb7b03 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -496,6 +496,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97) if (imx_ssi->ac97_reset) imx_ssi->ac97_reset(ac97); + /* First read sometimes fails, do a dummy read */ + imx_ssi_ac97_read(ac97, 0); } static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) @@ -504,6 +506,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) if (imx_ssi->ac97_warm_reset) imx_ssi->ac97_warm_reset(ac97); + + /* First read sometimes fails, do a dummy read */ + imx_ssi_ac97_read(ac97, 0); } struct snd_ac97_bus_ops soc_ac97_ops = { diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 8e52c1485df3..eb4373840bb6 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c @@ -51,7 +51,7 @@ static struct snd_soc_card pcm030_card = { .num_links = ARRAY_SIZE(pcm030_fabric_dai), }; -static int __init pcm030_fabric_probe(struct platform_device *op) +static int pcm030_fabric_probe(struct platform_device *op) { struct device_node *np = op->dev.of_node; struct device_node *platform_np; diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d7231e336a7c..6bbeb0bf1a73 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -972,6 +972,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = { static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) { struct i2s_dai *i2s; + int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); if (i2s == NULL) @@ -996,15 +997,17 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) i2s->i2s_dai_drv.capture.channels_max = 2; i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; + dev_set_drvdata(&i2s->pdev->dev, i2s); } else { /* Create a new platform_device for Secondary */ - i2s->pdev = platform_device_register_resndata(NULL, - "samsung-i2s-sec", -1, NULL, 0, NULL, 0); + i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1); if (IS_ERR(i2s->pdev)) return NULL; - } - /* Pre-assign snd_soc_dai_set_drvdata */ - dev_set_drvdata(&i2s->pdev->dev, i2s); + platform_set_drvdata(i2s->pdev, i2s); + ret = platform_device_add(i2s->pdev); + if (ret < 0) + return NULL; + } return i2s; } @@ -1107,6 +1110,10 @@ static int samsung_i2s_probe(struct platform_device *pdev) if (samsung_dai_type == TYPE_SEC) { sec_dai = dev_get_drvdata(&pdev->dev); + if (!sec_dai) { + dev_err(&pdev->dev, "Unable to get drvdata\n"); + return -EFAULT; + } snd_soc_register_dai(&sec_dai->pdev->dev, &sec_dai->i2s_dai_drv); asoc_dma_platform_register(&pdev->dev); diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 19eff8fc4fdd..1a8b03e4b41b 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c @@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd) return 0; } -static struct snd_soc_platform sh7760_soc_platform = { - .pcm_ops = &camelot_pcm_ops, +static struct snd_soc_platform_driver sh7760_soc_platform = { + .ops = &camelot_pcm_ops, .pcm_new = camelot_pcm_new, .pcm_free = camelot_pcm_free, }; diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index b5b3db71e253..ed0bfb0ddb96 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -211,19 +211,27 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream, if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) { ret = platform->driver->compr_ops->set_params(cstream, params); if (ret < 0) - goto out; + goto err; } if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) { ret = rtd->dai_link->compr_ops->set_params(cstream); if (ret < 0) - goto out; + goto err; } snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, SND_SOC_DAPM_STREAM_START); -out: + /* cancel any delayed stream shutdown that is pending */ + rtd->pop_wait = 0; + mutex_unlock(&rtd->pcm_mutex); + + cancel_delayed_work_sync(&rtd->delayed_work); + + return ret; + +err: mutex_unlock(&rtd->pcm_mutex); return ret; } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b7e84a7cd9ee..ff4b45a5d796 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val = val << shift; ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); - if (ret != 0) + if (ret < 0) return ret; if (snd_soc_volsw_is_stereo(mc)) { @@ -3140,7 +3140,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, if (params->mask) { ret = regmap_read(codec->control_data, params->base, &val); if (ret != 0) - return ret; + goto out; val &= params->mask; @@ -3158,13 +3158,15 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, ((u32 *)data)[0] |= cpu_to_be32(val); break; default: - return -EINVAL; + ret = -EINVAL; + goto out; } } ret = regmap_raw_write(codec->control_data, params->base, data, len); +out: kfree(data); return ret; @@ -4197,7 +4199,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", propname, 2 * i, ret); - kfree(routes); return -EINVAL; } ret = of_property_read_string_index(np, propname, @@ -4206,7 +4207,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", propname, (2 * i) + 1, ret); - kfree(routes); return -EINVAL; } } diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1d6a9b3ceb27..d6d9ba2e6916 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -831,6 +831,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, if (path->weak) continue; + if (path->walking) + return 1; + if (path->walked) continue; @@ -838,6 +841,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, if (path->sink && path->connect) { path->walked = 1; + path->walking = 1; /* do we need to add this widget to the list ? */ if (list) { @@ -847,11 +851,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, dev_err(widget->dapm->dev, "ASoC: could not add widget %s\n", widget->name); + path->walking = 0; return con; } } con += is_connected_output_ep(path->sink, list); + + path->walking = 0; } } @@ -931,6 +938,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, if (path->weak) continue; + if (path->walking) + return 1; + if (path->walked) continue; @@ -938,6 +948,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, if (path->source && path->connect) { path->walked = 1; + path->walking = 1; /* do we need to add this widget to the list ? */ if (list) { @@ -947,11 +958,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, dev_err(widget->dapm->dev, "ASoC: could not add widget %s\n", widget->name); + path->walking = 0; return con; } } con += is_connected_input_ep(path->source, list); + + path->walking = 0; } } diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c index 9b76cc5a1148..5e7aebe1e664 100644 --- a/sound/soc/spear/spear_pcm.c +++ b/sound/soc/spear/spear_pcm.c @@ -149,9 +149,9 @@ static void spear_pcm_free(struct snd_pcm *pcm) static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); -static int spear_pcm_new(struct snd_card *card, - struct snd_soc_dai *dai, struct snd_pcm *pcm) +static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd) { + struct snd_card *card = rtd->card->snd_card; int ret; if (!card->dev->dma_mask) @@ -159,16 +159,16 @@ static int spear_pcm_new(struct snd_card *card, if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); - if (dai->driver->playback.channels_min) { - ret = spear_pcm_preallocate_dma_buffer(pcm, + if (rtd->cpu_dai->driver->playback.channels_min) { + ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK, spear_pcm_hardware.buffer_bytes_max); if (ret) return ret; } - if (dai->driver->capture.channels_min) { - ret = spear_pcm_preallocate_dma_buffer(pcm, + if (rtd->cpu_dai->driver->capture.channels_min) { + ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, SNDRV_PCM_STREAM_CAPTURE, spear_pcm_hardware.buffer_bytes_max); if (ret) diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c index c925ab0adeb6..5e2c55c5b255 100644 --- a/sound/soc/tegra/tegra_pcm.c +++ b/sound/soc/tegra/tegra_pcm.c @@ -43,8 +43,6 @@ static const struct snd_pcm_hardware tegra_pcm_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_INTERLEAVED, .formats = SNDRV_PCM_FMTBIT_S16_LE, .channels_min = 2, @@ -127,26 +125,6 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream) return 0; } -static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd) -{ - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - return snd_dmaengine_pcm_trigger(substream, - SNDRV_PCM_TRIGGER_START); - - case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - return snd_dmaengine_pcm_trigger(substream, - SNDRV_PCM_TRIGGER_STOP); - default: - return -EINVAL; - } - return 0; -} - static int tegra_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { @@ -164,7 +142,7 @@ static struct snd_pcm_ops tegra_pcm_ops = { .ioctl = snd_pcm_lib_ioctl, .hw_params = tegra_pcm_hw_params, .hw_free = tegra_pcm_hw_free, - .trigger = tegra_pcm_trigger, + .trigger = snd_dmaengine_pcm_trigger, .pointer = snd_dmaengine_pcm_pointer, .mmap = tegra_pcm_mmap, }; diff --git a/sound/usb/card.c b/sound/usb/card.c index 803953a9bff3..2da8ad75fd96 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -244,6 +244,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) usb_ifnum_to_if(dev, ctrlif)->intf_assoc; if (!assoc) { + /* + * Firmware writers cannot count to three. So to find + * the IAD on the NuForce UDH-100, also check the next + * interface. + */ + struct usb_interface *iface = + usb_ifnum_to_if(dev, ctrlif + 1); + if (iface && + iface->intf_assoc && + iface->intf_assoc->bFunctionClass == USB_CLASS_AUDIO && + iface->intf_assoc->bFunctionProtocol == UAC_VERSION_2) + assoc = iface->intf_assoc; + } + + if (!assoc) { snd_printk(KERN_ERR "Audio class v2 interfaces need an interface association\n"); return -EINVAL; } diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 5e634a2eb282..9e2703a25156 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -253,7 +253,7 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, { struct usb_device *dev = chip->dev; unsigned char data[4]; - int err, crate; + int err, cur_rate, prev_rate; int clock = snd_usb_clock_find_source(chip, fmt->clock); if (clock < 0) @@ -266,6 +266,19 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, return -ENXIO; } + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, + UAC2_CS_CONTROL_SAM_FREQ << 8, + snd_usb_ctrl_intf(chip) | (clock << 8), + data, sizeof(data)); + if (err < 0) { + snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", + dev->devnum, iface, fmt->altsetting); + prev_rate = 0; + } else { + prev_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); + } + data[0] = rate; data[1] = rate >> 8; data[2] = rate >> 16; @@ -280,19 +293,31 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, return err; } - if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - UAC2_CS_CONTROL_SAM_FREQ << 8, - snd_usb_ctrl_intf(chip) | (clock << 8), - data, sizeof(data))) < 0) { + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, + UAC2_CS_CONTROL_SAM_FREQ << 8, + snd_usb_ctrl_intf(chip) | (clock << 8), + data, sizeof(data)); + if (err < 0) { snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", dev->devnum, iface, fmt->altsetting); - return err; + cur_rate = 0; + } else { + cur_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); } - crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); - if (crate != rate) - snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); + if (cur_rate != rate) { + snd_printd(KERN_WARNING + "current rate %d is different from the runtime rate %d\n", + cur_rate, rate); + } + + /* Some devices doesn't respond to sample rate changes while the + * interface is active. */ + if (rate != prev_rate) { + usb_set_interface(dev, iface, 0); + usb_set_interface(dev, iface, fmt->altsetting); + } return 0; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 638e7f738018..ca4739c3f650 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - if (check_input_term(state, d->baSourceID[0], term) < 0) - return -ENODEV; + err = check_input_term(state, d->baSourceID[0], term); + if (err < 0) + return err; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = uac_selector_unit_iSelector(d); @@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ case UAC1_PROCESSING_UNIT: case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 */ - /* UAC2_EFFECT_UNIT */ { + /* UAC2_EFFECT_UNIT */ + case UAC2_EXTENSION_UNIT_V2: { struct uac_processing_unit_descriptor *d = p1; if (state->mixer->protocol == UAC_VERSION_2 && @@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void return err; /* determine the input source type and name */ - if (check_input_term(state, hdr->bSourceID, &iterm) < 0) - return -EINVAL; + err = check_input_term(state, hdr->bSourceID, &iterm); + if (err < 0) + return err; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ @@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) return parse_audio_extension_unit(state, unitid, p1); else /* UAC_VERSION_2 */ return parse_audio_processing_unit(state, unitid, p1); + case UAC2_EXTENSION_UNIT_V2: + return parse_audio_extension_unit(state, unitid, p1); default: snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; @@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); - if (err < 0) + if (err < 0 && err != -EINVAL) return err; } else { /* UAC_VERSION_2 */ struct uac2_output_terminal_descriptor *desc = p; @@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); - if (err < 0) + if (err < 0 && err != -EINVAL) return err; /* for UAC2, use the same approach to also add the clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); - if (err < 0) + if (err < 0 && err != -EINVAL) return err; } } diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 497d2741d119..ebe91440a068 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, else ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, cpu_to_le16(wIndex), + 0, wIndex, &tmp, sizeof(tmp), 1000); up_read(&mixer->chip->shutdown_rwsem); @@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, else ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - cpu_to_le16(wValue), cpu_to_le16(wIndex), + wValue, wIndex, NULL, 0, 1000); up_read(&mixer->chip->shutdown_rwsem); diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5325a3869bb7..9c5ab22358b1 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) { int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, - cpu_to_le16(1), 0, NULL, 0, 1000); + 1, 0, NULL, 0, 1000); if (ret < 0) return ret; diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index a20e32033431..0b0a90787db6 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -122,7 +122,7 @@ export Q VERBOSE EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) -INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES) +INCLUDES = -I. $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -Wall diff --git a/tools/perf/Makefile b/tools/perf/Makefile index a2108ca1cc17..bb74c79cd16e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line") PERF_DEBUG = $(DEBUG) endif ifndef PERF_DEBUG - CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2 + CFLAGS_OPTIMIZE = -O6 endif ifdef PARSER_DEBUG @@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-W CFLAGS := $(CFLAGS) -Wvolatile-register-var endif +ifndef PERF_DEBUG + ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y) + CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2 + endif +endif + ### --- END CONFIGURATION SECTION --- ifeq ($(srctree),) diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index a5223e6a7b43..0fdc85269c4d 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -1,6 +1,30 @@ #ifndef BENCH_H #define BENCH_H +/* + * The madvise transparent hugepage constants were added in glibc + * 2.13. For compatibility with older versions of glibc, define these + * tokens if they are not already defined. + * + * PA-RISC uses different madvise values from other architectures and + * needs to be special-cased. + */ +#ifdef __hppa__ +# ifndef MADV_HUGEPAGE +# define MADV_HUGEPAGE 67 +# endif +# ifndef MADV_NOHUGEPAGE +# define MADV_NOHUGEPAGE 68 +# endif +#else +# ifndef MADV_HUGEPAGE +# define MADV_HUGEPAGE 14 +# endif +# ifndef MADV_NOHUGEPAGE +# define MADV_NOHUGEPAGE 15 +# endif +#endif + extern int bench_numa(int argc, const char **argv, const char *prefix); extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 774c90713a53..f1a939ebc19c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -573,13 +573,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) perf_event__synthesize_guest_os, tool); } - if (!opts->target.system_wide) + if (perf_target__has_task(&opts->target)) err = perf_event__synthesize_thread_map(tool, evsel_list->threads, process_synthesized_event, machine); - else + else if (perf_target__has_cpu(&opts->target)) err = perf_event__synthesize_threads(tool, process_synthesized_event, machine); + else /* command specified */ + err = 0; if (err != 0) goto out_delete_session; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 38624686ee9a..226a4ae2f936 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -208,8 +208,9 @@ static inline int script_browse(const char *script_opt __maybe_unused) return 0; } -#define K_LEFT -1 -#define K_RIGHT -2 +#define K_LEFT -1000 +#define K_RIGHT -2000 +#define K_SWITCH_INPUT_DATA -3000 #endif #ifdef GTK2_SUPPORT diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 55433aa42c8f..eabdce0a2daa 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c @@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr, const char *list) slist->rblist.node_delete = strlist__node_delete; slist->dupstr = dupstr; - if (slist && strlist__parse_list(slist, list) != 0) + if (list && strlist__parse_list(slist, list) != 0) goto out_error; } diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 6f3214ed4444..321e066a0753 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1421,6 +1421,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ + case 0x46: /* HSW */ return 1; case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ @@ -1515,6 +1516,7 @@ void rapl_probe(unsigned int family, unsigned int model) case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ + case 0x46: /* HSW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; break; case 0x2D: @@ -1754,6 +1756,7 @@ int is_snb(unsigned int family, unsigned int model) case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ + case 0x46: /* HSW */ return 1; } return 0; @@ -2276,7 +2279,7 @@ int main(int argc, char **argv) cmdline(argc, argv); if (verbose) - fprintf(stderr, "turbostat v3.2 February 11, 2013" + fprintf(stderr, "turbostat v3.3 March 15, 2013" " - Len Brown <lenb@kernel.org>\n"); turbostat_init(); diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index 8674b9ec14f6..fe1e66b6ef40 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -38,7 +38,7 @@ #include <unistd.h> #include <tools/le_byteshift.h> -#include "../../include/linux/usb/functionfs.h" +#include "../../include/uapi/linux/usb/functionfs.h" /******************** Little Endian Handling ********************************/ diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index ce82b9401958..5ba005c00e2f 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; - ASSERT(redir_index < IOAPIC_NUM_PINS); + if (redir_index < IOAPIC_NUM_PINS) + redir_content = + ioapic->redirtbl[redir_index].bits; + else + redir_content = ~0ULL; - redir_content = ioapic->redirtbl[redir_index].bits; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index adc68feb5c5a..f18013f09e68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t start_gfn = gpa >> PAGE_SHIFT; + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; + gfn_t nr_pages_needed = end_gfn - start_gfn + 1; + gfn_t nr_pages_avail; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->memslot = gfn_to_memslot(kvm, gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); - if (!kvm_is_error_hva(ghc->hva)) + ghc->len = len; + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { ghc->hva += offset; - else - return -EFAULT; - + } else { + /* + * If the requested region crosses two memslots, we still + * verify that the entire region is valid here. + */ + while (start_gfn <= end_gfn) { + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, + &nr_pages_avail); + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + start_gfn += nr_pages_avail; + } + /* Use the slow path for cross page reads and writes. */ + ghc->memslot = NULL; + } return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_write_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_read_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; |